From cc549272784bee8fe32bf2bf0995341d91a6cfeb Mon Sep 17 00:00:00 2001 From: Yandong Yao Date: Tue, 6 Nov 2018 11:04:22 +0800 Subject: [PATCH 001/330] Initial commit --- .gitignore | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 2 ++ 2 files changed, 54 insertions(+) create mode 100644 .gitignore create mode 100644 README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..c6127b38c1a --- /dev/null +++ b/.gitignore @@ -0,0 +1,52 @@ +# Prerequisites +*.d + +# Object files +*.o +*.ko +*.obj +*.elf + +# Linker output +*.ilk +*.map +*.exp + +# Precompiled Headers +*.gch +*.pch + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects (inc. Windows DLLs) +*.dll +*.so +*.so.* +*.dylib + +# Executables +*.exe +*.out +*.app +*.i*86 +*.x86_64 +*.hex + +# Debug files +*.dSYM/ +*.su +*.idb +*.pdb + +# Kernel Module Compile Results +*.mod* +*.cmd +.tmp_versions/ +modules.order +Module.symvers +Mkfile.old +dkms.conf diff --git a/README.md b/README.md new file mode 100644 index 00000000000..1074a11be0d --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# diskquota +PostgreSQL disk quota extension From 58d426c7749bbe0e18d75ba1161e65f709812e2b Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 6 Nov 2018 06:38:47 +0000 Subject: [PATCH 002/330] Implement diskquota extension 1. Implement Disk Quota feature as an extension 2. Support limit the disk usage of schema and role 3. Support UDF wrapper to set quota limit 4. Support in-transcation enforcement when quota limit exceeds Co-authored-by: Haozhou Wang Co-authored-by: Hubert Zhang Co-authored-by: Hao Wu --- Makefile | 15 + README.md | 212 +++++++++- activetable.c | 266 ++++++++++++ activetable.h | 30 ++ data/csmall.txt | 100 +++++ data/generate_insert.py | 6 + data/generate_table.py | 8 + diskquota--1.0.sql | 37 ++ diskquota.c | 771 ++++++++++++++++++++++++++++++++++ diskquota.control | 5 + diskquota.h | 33 ++ diskquota_schedule | 6 + enforcement.c | 95 +++++ quotamodel.c | 899 ++++++++++++++++++++++++++++++++++++++++ sql/clean.sql | 5 + sql/dummy.sql | 0 sql/empty.sql | 0 sql/prepare.sql | 15 + sql/test_column.sql | 19 + sql/test_copy.sql | 17 + sql/test_drop_table.sql | 20 + sql/test_partition.sql | 34 ++ sql/test_rename.sql | 48 +++ sql/test_reschema.sql | 19 + sql/test_role.sql | 32 ++ sql/test_schema.sql | 32 ++ sql/test_temp_role.sql | 22 + sql/test_toast.sql | 23 + sql/test_truncate.sql | 21 + sql/test_update.sql | 13 + sql/test_vacuum.sql | 23 + test_diskquota.conf | 5 + 32 files changed, 2829 insertions(+), 2 deletions(-) create mode 100644 Makefile create mode 100644 activetable.c create mode 100644 activetable.h create mode 100644 data/csmall.txt create mode 100644 data/generate_insert.py create mode 100644 data/generate_table.py create mode 100644 diskquota--1.0.sql create mode 100644 diskquota.c create mode 100644 diskquota.control create mode 100644 diskquota.h create mode 100644 diskquota_schedule create mode 100644 enforcement.c create mode 100644 quotamodel.c create mode 100644 sql/clean.sql create mode 100644 sql/dummy.sql create mode 100644 sql/empty.sql create mode 100644 sql/prepare.sql create mode 100644 sql/test_column.sql create mode 100644 sql/test_copy.sql create mode 100644 sql/test_drop_table.sql create mode 100644 sql/test_partition.sql create mode 100644 sql/test_rename.sql create mode 100644 sql/test_reschema.sql create mode 100644 sql/test_role.sql create mode 100644 sql/test_schema.sql create mode 100644 sql/test_temp_role.sql create mode 100644 sql/test_toast.sql create mode 100644 sql/test_truncate.sql create mode 100644 sql/test_update.sql create mode 100644 sql/test_vacuum.sql create mode 100644 test_diskquota.conf diff --git a/Makefile b/Makefile new file mode 100644 index 00000000000..62fec5f42c4 --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +# contrib/diskquota/Makefile + +MODULE_big = diskquota + +EXTENSION = diskquota +DATA = diskquota--1.0.sql +SRCDIR = ./ +FILES = $(shell find $(SRCDIR) -type f -name "*.c") +OBJS = diskquota.o enforcement.o quotamodel.o activetable.o + +REGRESS = dummy +REGRESS_OPTS = --temp-config=test_diskquota.conf --temp-instance=/tmp/pg_diskquota_test --schedule=diskquota_schedule +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) diff --git a/README.md b/README.md index 1074a11be0d..15f595fe6a1 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,210 @@ -# diskquota -PostgreSQL disk quota extension +# Overview +Diskquota is an extension that provides disk usage enforcement for database objects in Postgresql. Currently it supports to set quota limit on schema and role in a given database and limit the amount of disk space that a schema or a role can use. + +This project is inspired by Heikki's pg_quota project (link: https://github.com/hlinnaka/pg_quota) and enhance it to support different kinds of DDL and DML which may change the disk usage of database objects. + +Diskquota is a soft limit of disk uages. It has some delay to detect the schemas or roles whose quota limit is exceeded. Here 'soft limit' supports two kinds of encforcement: Query loading data into out-of-quota schema/role will be forbidden before query is running. Query loading data into schema/role with rooms will be cancelled when the quota limit is reached dynamically during the query is running. + +# Design +Diskquota extension is based on background worker framework in Postgresql. +There are two kinds of background workers: diskquota launcher and diskquota worker. + +There is only one laucher process per database cluster(i.e. one laucher per postmaster). +Launcher process is reponsible for manage worker processes: Calling RegisterDynamicBackgroundWorker() +to create new workers and keep their handle. Calling TerminateBackgroundWorker() to +terminate workers which are disabled when DBA modify diskquota.monitor_databases + +There are many worker processes, one for each database which is listed in diskquota.monitor_databases. +Currently, we support to monitor at most 10 databases at the same time. +Worker processes are responsible for monitoring the disk usage of schemas and roles for the target database, +and do quota enfocement. It will periodically (can be set via diskquota.naptime) recalcualte the table size of active tables, and update their corresponding schema or owner's disk usage. Then compare with quota limit for those schemas or roles. If exceeds the limit, put the corresponding schemas or roles into the blacklist in shared memory. Schemas or roles in blacklist are used to do query enforcement to cancel queries which plan to load data into these schemas or roles. + +## Active table +Active tables are the tables whose table size may change in the last quota check interval. We use hooks in smgecreate(), smgrextend() and smgrtruncate() to detect active tables and store them(currently relfilenode) in the shared memory. Diskquota worker process will periodically consuming active table in shared memories, convert relfilenode to relaton oid, and calcualte table size by calling pg_total_relation_size(), which will sum the size of table(including: base, vm, fsm, toast and index). + +## Enforcement +Enforcement is implemented as hooks. There are two kinds of enforcement hooks: enforcement before query is running and +enforcement during query is running. +The 'before query' one is implemented at ExecutorCheckPerms_hook in function ExecCheckRTPerms() +The 'during query' one is implemented at BufferExtendCheckPerms_hook in function ReadBufferExtended(). Note that the implementation of BufferExtendCheckPerms_hook will firstly check whether function request a new block, if not skip directyly. + +## Quota setting store +Quota limit of a schema or a role is stored in table 'quota_config' in 'diskquota' schema in monitored database. So each database stores and manages its own disk quota configuration. Note that although role is a db object in cluster level, we limit the diskquota of a role to be database specific. That is to say, a role may has different quota limit on different databases and their disk usage is isolated between databases. + +# Install +1. Compile and install disk quota. +``` +cd contrib/diskquota; +make; +make install; +``` +2. Config postgresql.conf +``` +# enable diskquota in preload library. +shared_preload_libraries = 'diskquota' +# set monitored databases +diskquota.monitor_databases = 'postgres' +# set naptime (second) to refresh the disk quota stats periodically +diskquota.naptime = 2 +``` +3. Create diskquota extension in monitored database. +``` +create extension diskquota; +``` + +4. Reload database configuraion +``` +# reset monitored database list in postgresql.conf +diskquota.monitor_databases = 'postgres, postgres2' +# reload configuration +pg_ctl reload +``` + +# Usage +1. Set/update/delete schema quota limit using diskquota.set_schema_quota +``` +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +set search_path to s1; + +create table a(i int); +# insert small data succeeded +insert into a select generate_series(1,100); +# insert large data failed +insert into a select generate_series(1,10000000); +# insert small data failed +insert into a select generate_series(1,100); + +# delete quota configuration +select diskquota.set_schema_quota('s1', '-1'); +# insert small data succeed +select pg_sleep(5); +insert into a select generate_series(1,100); +reset search_path; +``` + +2. Set/update/delete role quota limit using diskquota.set_role_quota +``` +create role u1 nologin; +create table b (i int); +alter table b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + +# insert small data succeeded +insert into b select generate_series(1,100); +# insert large data failed +insert into b select generate_series(1,10000000); +# insert small data failed +insert into b select generate_series(1,100); + +# delete quota configuration +select diskquota.set_role_quota('u1', '-1'); +# insert small data succeed +select pg_sleep(5); +insert into a select generate_series(1,100); +reset search_path; +``` + +3. Show schema quota limit and current usage +``` +select * from diskquota.show_schema_quota_view; +``` + + +# Test +Run regression tests. +``` +cd contrib/diskquota; +make installcheck +``` + +# Benchmark & Performence Test +## Cost of diskquota worker. +During each refresh interval, the disk quota worker need to refresh the disk quota model. + +It take less than 100ms under 100K user tables with no avtive tables. + +It take less than 200ms under 100K user tables with 1K active tables. + +## Impact on OLTP queries +We test OLTP queries to measure the impact of enabling diskquota feature. The range is from 2k tables to 10k tables. +Each connection will insert 100 rows into each table. And the parallel connections range is from 5 to 25. Number of active tables will be around 1k. + +Without diskquota enabled (seconds) + +| | 2k | 4k | 6k | 8k | 10k | +|:-: |:-: |:-: |:-: |:-: |--- | +| 5 | 4.002 | 11.356 | 18.460 | 28.591 | 41.123 | +| 10 | 4.832 | 11.988 | 21.113 | 32.829 | 45.832 | +| 15 | 6.238 | 16.896 | 28.722 | 45.375 | 64.642 | +| 20 | 8.036 | 21.711 | 38.499 | 61.763 | 87.875 | +| 25 | 9.909 | 27.175 | 47.996 | 75.688 | 106.648 | + +With diskquota enabled (seconds) + +| | 2k | 4k | 6k | 8k | 10k | +|:-: |:-: |:-: |:-: |:-: |--- | +| 5 | 4.135 | 10.641 | 18.776 | 28.804 | 41.740 | +| 10 | 4.773 | 12.407 | 22.351 | 34.243 | 47.568 | +| 15 | 6.355 | 17.305 | 30.941 | 46.967 | 66.216 | +| 20 | 9.451 | 22.231 | 40.645 | 61.758 | 88.309 | +| 25 | 10.096 | 26.844 | 48.910 | 76.537 | 108.025 | + +The performance difference between with/without diskquota enabled are less then 2-3% in most case. Therefore, there is no significant performance downgrade when diskquota is enabled. + +# Notes +1. Drop database with diskquota enabled. + +If DBA enable monitoring diskquota on a database, there will be a connection +to this database from diskquota worker process. DBA need to first remove this +database from diskquota.monitor_databases in postgres.conf, and reload +configuration by call `pg_ctl reload`. Then database could be dropped successfully. + +2. Temp table. + +Diskquota supports to limit the disk usage of temp table as well. But schema and role are different. +For role, i.e. the owner of the temp table, diakquota will treat it the same as normal tables and sum its +table size to its owner's quota. While for schema, temp table is located under namespace 'pg_temp_backend_id', +so temp table size will not sum to the current schema's qouta. + +# Known Issue. + +1. Since Postgresql doesn't support READ UNCOMMITTED isolation level, +our implementation cannot detect the new created table inside an +uncommitted transaction(See below example). Hence enforcement on +that newly created table will not work. After transaction commit, +diskquota worker process could detect the newly create table +and do enfocement accordingly in later queries. +``` +# suppose quota of schema s1 is 1MB. +set search_path to s1; +create table b; +BEGIN; +create table a; +# Issue: quota enforcement doesn't work on table a +insert into a select generate_series(1,200000); +# quota enforcement works on table b +insert into b select generate_series(1,200000); +# quota enforcement works on table a, +# since quota limit of schema s1 has already exceeds. +insert into a select generate_series(1,200000); +END; +``` + +One solution direction is that we calculate the additional 'uncommited data size' +for schema and role in worker process. Since pg_total_relation_size need to hold +AccessShareLock to relation(And worker process don't even know this reloid exists), +we need to skip it, and call stat() directly with tolerant to file unlink. +Skip lock is dangerous and we plan to leave it as known issue at current stage. + +2. Out of shared memory + +Diskquota extension uses two kinds of shared memories. One is used to save black list and another one is +to save active table list. The black list shared memory can support up to 1 MiB database objects which exceed quota limit. +The active table list shared memory can support up to 1 MiB active tables in default, and user could reset it in GUC diskquota_max_active_tables. + +As shared memory is pre-allocated, user needs to restart DB if they updated this GUC value. + +If black list shared memory is full, it's possible to load data into some schemas or roles which quota limit are reached. +If active table shared memory is full, disk quota worker may failed to detect the corresponding disk usage change in time. + diff --git a/activetable.c b/activetable.c new file mode 100644 index 00000000000..e3c8e2dd78f --- /dev/null +++ b/activetable.c @@ -0,0 +1,266 @@ +/* ------------------------------------------------------------------------- + * + * activetable.c + * + * This code is responsible for detecting active table for databases + * + * Copyright (C) 2013, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/diskquota/activetable.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/indexing.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "storage/shmem.h" +#include "storage/smgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" + +#include "activetable.h" +#include "diskquota.h" + +HTAB *active_tables_map = NULL; +static SmgrStat_hook_type prev_SmgrStat_hook = NULL; +static ScanKeyData relfilenode_skey[2]; + +static void report_active_table_SmgrStat(SMgrRelation reln); +HTAB* get_active_tables(void); +void init_active_table_hook(void); +void init_shm_worker_active_tables(void); +void init_lock_active_tables(void); +void init_relfilenode_key(void); + +/* + * Register smgr hook to detect active table. + */ +void +init_active_table_hook(void) +{ + prev_SmgrStat_hook = SmgrStat_hook; + SmgrStat_hook = report_active_table_SmgrStat; +} + +/* + * Init active_tables_map shared memory + */ +void +init_shm_worker_active_tables(void) +{ + HASHCTL ctl; + memset(&ctl, 0, sizeof(ctl)); + + ctl.keysize = sizeof(DiskQuotaActiveTableEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hash = tag_hash; + + active_tables_map = ShmemInitHash ("active_tables", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); +} + +/* + * Init lock of active table map + */ +void init_lock_active_tables(void) +{ + bool found = false; + active_table_shm_lock = ShmemInitStruct("disk_quota_active_table_shm_lock", + sizeof(disk_quota_shared_state), + &found); + + if (!found) + { + active_table_shm_lock->lock = &(GetNamedLWLockTranche("disk_quota_active_table_shm_lock"))->lock; + } +} + +/* + * Init relfilenode key to index search table oid + * given relfilenode and tablespace. + */ +void +init_relfilenode_key(void) +{ + int i; + + /* build skey */ + MemSet(&relfilenode_skey, 0, sizeof(relfilenode_skey)); + + for (i = 0; i < 2; i++) + { + fmgr_info_cxt(F_OIDEQ, + &relfilenode_skey[i].sk_func, + CacheMemoryContext); + relfilenode_skey[i].sk_strategy = BTEqualStrategyNumber; + relfilenode_skey[i].sk_subtype = InvalidOid; + relfilenode_skey[i].sk_collation = InvalidOid; + } + + relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace; + relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode; +} + +/* + * Get local active table with table oid and table size info. + * This function first copies active table map from shared memory + * to local active table map with refilenode info. Then traverses + * the local map and find corresponding table oid and table file + * size. Finnaly stores them into local active table map and return. + */ +HTAB* get_active_tables() +{ + HASHCTL ctl; + HTAB *local_active_table_file_map = NULL; + HTAB *local_active_table_stats_map = NULL; + HASH_SEQ_STATUS iter; + DiskQuotaActiveTableFileEntry *active_table_file_entry; + DiskQuotaActiveTableEntry *active_table_entry; + + Relation relation; + HeapTuple tuple; + SysScanDesc relScan; + Oid relOid; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; + + local_active_table_file_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + /* Move active table from shared memory to local active table map */ + LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, active_tables_map); + + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + bool found; + DiskQuotaActiveTableFileEntry *entry; + + if (active_table_file_entry->dbid != MyDatabaseId) + { + continue; + } + + /* Add the active table entry into local hash table*/ + entry = hash_search(local_active_table_file_map, active_table_file_entry, HASH_ENTER, &found); + if (entry) + *entry = *active_table_file_entry; + hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); + } + + LWLockRelease(active_table_shm_lock->lock); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_active_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + relation = heap_open(RelationRelationId, AccessShareLock); + /* traverse local active table map and calculate their file size. */ + hash_seq_init(&iter, local_active_table_file_map); + /* scan whole local map, get the oid of each table and calculate the size of them */ + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + Size tablesize; + bool found; + ScanKeyData skey[2]; + Oid reltablespace; + + reltablespace = active_table_file_entry->tablespaceoid; + + /* pg_class will show 0 when the value is actually MyDatabaseTableSpace */ + if (reltablespace == MyDatabaseTableSpace) + reltablespace = 0; + + /* set scan arguments */ + memcpy(skey, relfilenode_skey, sizeof(skey)); + skey[0].sk_argument = ObjectIdGetDatum(reltablespace); + skey[1].sk_argument = ObjectIdGetDatum(active_table_file_entry->relfilenode); + relScan = systable_beginscan(relation, + ClassTblspcRelfilenodeIndexId, + true, + NULL, + 2, + skey); + + tuple = systable_getnext(relScan); + + if (!HeapTupleIsValid(tuple)) + { + systable_endscan(relScan); + continue; + } + relOid = HeapTupleGetOid(tuple); + + /* Call function directly to get size of table by oid */ + tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); + + active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); + if (active_table_entry) + { + active_table_entry->tableoid = relOid; + active_table_entry->tablesize = tablesize; + } + systable_endscan(relScan); + } + elog(DEBUG1, "active table number is:%ld", hash_get_num_entries(local_active_table_file_map)); + heap_close(relation, AccessShareLock); + hash_destroy(local_active_table_file_map); + return local_active_table_stats_map; +} + +/* + * Hook function in smgr to report the active table + * information and stroe them in active table shared memory + * diskquota worker will consuming these active tables and + * recalculate their file size to update diskquota model. + */ +static void +report_active_table_SmgrStat(SMgrRelation reln) +{ + DiskQuotaActiveTableFileEntry *entry; + DiskQuotaActiveTableFileEntry item; + bool found = false; + + if (prev_SmgrStat_hook) + (*prev_SmgrStat_hook)(reln); + + MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); + item.dbid = reln->smgr_rnode.node.dbNode; + item.relfilenode = reln->smgr_rnode.node.relNode; + item.tablespaceoid = reln->smgr_rnode.node.spcNode; + + LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); + if (entry && !found) + *entry = item; + LWLockRelease(active_table_shm_lock->lock); + + if (!found && entry == NULL) { + /* We may miss the file size change of this relation at current refresh interval.*/ + ereport(WARNING, (errmsg("Share memory is not enough for active tables."))); + } +} diff --git a/activetable.h b/activetable.h new file mode 100644 index 00000000000..64b721e3d07 --- /dev/null +++ b/activetable.h @@ -0,0 +1,30 @@ +#ifndef ACTIVE_TABLE_H +#define ACTIVE_TABLE_H + +#include "storage/lwlock.h" +#include "diskquota.h" + +/* Cache to detect the active table list */ +typedef struct DiskQuotaActiveTableFileEntry +{ + Oid dbid; + Oid relfilenode; + Oid tablespaceoid; +} DiskQuotaActiveTableFileEntry; + +typedef struct DiskQuotaActiveTableEntry +{ + Oid tableoid; + Size tablesize; +} DiskQuotaActiveTableEntry; + + +extern HTAB* get_active_tables(void); +extern void init_active_table_hook(void); +extern void init_relfilenode_key(void); +extern void init_shm_worker_active_tables(void); +extern void init_lock_active_tables(void); + +extern HTAB *active_tables_map; +extern disk_quota_shared_state *active_table_shm_lock; +#endif diff --git a/data/csmall.txt b/data/csmall.txt new file mode 100644 index 00000000000..f6d8fb48fba --- /dev/null +++ b/data/csmall.txt @@ -0,0 +1,100 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/data/generate_insert.py b/data/generate_insert.py new file mode 100644 index 00000000000..34111804af5 --- /dev/null +++ b/data/generate_insert.py @@ -0,0 +1,6 @@ +# generate insert statement +f = open('insertfile', 'w') +f.write("set search_path to perfs;\n"); +for i in range(1,2000): + f.write("insert into a"+str(i)+" select generate_series(1,3000);\n"); +f.close() diff --git a/data/generate_table.py b/data/generate_table.py new file mode 100644 index 00000000000..88a4db938cb --- /dev/null +++ b/data/generate_table.py @@ -0,0 +1,8 @@ +#generate create table statement +f = open('tablefile', 'w') +f.write("create schema perfs;\nset search_path to perfs;\n"); +sql = "create table a" +sql2 = " (i int);\n" +for i in range(1,100000): + f.write(sql+str(i)+sql2+ "insert into a"+str(i)+" values(2);\n"); +f.close() diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql new file mode 100644 index 00000000000..925f2c2a41b --- /dev/null +++ b/diskquota--1.0.sql @@ -0,0 +1,37 @@ +/* contrib/diskquota/diskquota--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +set search_path='diskquota'; + +-- Configuration table +create table diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); + +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); + +CREATE FUNCTION set_schema_quota(text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE FUNCTION set_role_quota(text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE VIEW show_schema_quota_view AS +SELECT pg_namespace.nspname as schema_name, pg_class.relnamespace as schema_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as nspsize_in_bytes +FROM pg_namespace, pg_class, diskquota.quota_config as quota +WHERE pg_class.relnamespace = quota.targetoid and pg_class.relnamespace = pg_namespace.oid and quota.quotatype=0 +GROUP BY pg_class.relnamespace, pg_namespace.nspname, quota.quotalimitMB; + +CREATE VIEW show_role_quota_view AS +SELECT pg_roles.rolname as role_name, pg_class.relowner as role_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as rolsize_in_bytes +FROM pg_roles, pg_class, diskquota.quota_config as quota +WHERE pg_class.relowner = quota.targetoid and pg_class.relowner = pg_roles.oid and quota.quotatype=1 +GROUP BY pg_class.relowner, pg_roles.rolname, quota.quotalimitMB; + +reset search_path; diff --git a/diskquota.c b/diskquota.c new file mode 100644 index 00000000000..546455fc640 --- /dev/null +++ b/diskquota.c @@ -0,0 +1,771 @@ +/* ------------------------------------------------------------------------- + * + * diskquota.c + * + * Diskquota is used to limit the amount of disk space that a schema or a role + * can use. Diskquota is based on background worker framework. It contains a + * launcher process which is responsible for starting/refreshing the diskquota + * worker processes which monitor given databases. + * + * Copyright (C) 2013, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/diskquota/diskquota.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "catalog/namespace.h" +#include "catalog/pg_collation.h" +#include "executor/spi.h" +#include "miscadmin.h" +#include "pgstat.h" +#include "postmaster/bgworker.h" +#include "storage/ipc.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/formatting.h" +#include "utils/numeric.h" +#include "utils/varlena.h" + +#include "activetable.h" +#include "diskquota.h" +PG_MODULE_MAGIC; + +/* disk quota helper function */ +PG_FUNCTION_INFO_V1(set_schema_quota); +PG_FUNCTION_INFO_V1(set_role_quota); + +/* flags set by signal handlers */ +static volatile sig_atomic_t got_sighup = false; +static volatile sig_atomic_t got_sigterm = false; + +/* GUC variables */ +int diskquota_naptime = 0; +char *diskquota_monitored_database_list = NULL; +int diskquota_max_active_tables = 0; + +typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; + +/* disk quota worker info used by launcher to manage the worker processes. */ +struct DiskQuotaWorkerEntry +{ + char dbname[NAMEDATALEN]; + BackgroundWorkerHandle *handle; +}; + +/* using hash table to support incremental update the table size entry.*/ +static HTAB *disk_quota_worker_map = NULL; + +/* functions of disk quota*/ +void _PG_init(void); +void _PG_fini(void); +void disk_quota_worker_main(Datum); +void disk_quota_launcher_main(Datum); + +static void disk_quota_sigterm(SIGNAL_ARGS); +static void disk_quota_sighup(SIGNAL_ARGS); +static List *get_database_list(void); +static int64 get_size_in_mb(char *str); +static void refresh_worker_list(void); +static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); +static int start_worker(char* dbname); + +/* + * Entrypoint of diskquota module. + * + * Init shared memory and hooks. + * Define GUCs. + * start diskquota launcher process. + */ +void +_PG_init(void) +{ + BackgroundWorker worker; + + init_disk_quota_shmem(); + init_disk_quota_enforcement(); + init_active_table_hook(); + + /* get the configuration */ + DefineCustomIntVariable("diskquota.naptime", + "Duration between each check (in seconds).", + NULL, + &diskquota_naptime, + 10, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + if (!process_shared_preload_libraries_in_progress) + return; + + DefineCustomStringVariable("diskquota.monitor_databases", + gettext_noop("database list with disk quota monitored."), + NULL, + &diskquota_monitored_database_list, + "", + PGC_SIGHUP, GUC_LIST_INPUT, + NULL, + NULL, + NULL); + + DefineCustomIntVariable("diskquota.max_active_tables", + "max number of active tables monitored by disk-quota", + NULL, + &diskquota_max_active_tables, + 1 * 1024 * 1024, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + /* set up common data for diskquota launcher worker */ + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | + BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + worker.bgw_restart_time = BGW_NEVER_RESTART; + sprintf(worker.bgw_library_name, "diskquota"); + sprintf(worker.bgw_function_name, "disk_quota_launcher_main"); + worker.bgw_notify_pid = 0; + + snprintf(worker.bgw_name, BGW_MAXLEN, "disk quota launcher"); + + RegisterBackgroundWorker(&worker); +} + +void +_PG_fini(void) +{ +} + +/* + * Signal handler for SIGTERM + * Set a flag to let the main loop to terminate, and set our latch to wake + * it up. + */ +static void +disk_quota_sigterm(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigterm = true; + if (MyProc) + SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + +/* + * Signal handler for SIGHUP + * Set a flag to tell the main loop to reread the config file, and set + * our latch to wake it up. + */ +static void +disk_quota_sighup(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sighup = true; + if (MyProc) + SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + + +/* ---- Functions for disk quota worker process ---- */ + +/* + * Disk quota worker process will refresh disk quota model periodically. + * Refresh logic is defined in quotamodel.c + */ +void +disk_quota_worker_main(Datum main_arg) +{ + char *dbname=MyBgworkerEntry->bgw_name; + elog(LOG,"start disk quota worker process to monitor database:%s", dbname); + + /* Establish signal handlers before unblocking signals. */ + pqsignal(SIGHUP, disk_quota_sighup); + pqsignal(SIGTERM, disk_quota_sigterm); + + /* We're now ready to receive signals */ + BackgroundWorkerUnblockSignals(); + + /* Connect to our database */ + BackgroundWorkerInitializeConnection(dbname, NULL, 0); + + /* Initialize diskquota related local hash map and refresh model immediately*/ + init_disk_quota_model(); + refresh_disk_quota_model(true); + + /* + * Main loop: do this until the SIGTERM handler tells us to terminate + */ + while (!got_sigterm) + { + int rc; + + /* + * Background workers mustn't call usleep() or any direct equivalent: + * instead, they may wait on their process latch, which sleeps as + * necessary, but is awakened if postmaster dies. That way the + * background process goes away immediately in an emergency. + */ + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L, PG_WAIT_EXTENSION); + ResetLatch(&MyProc->procLatch); + + /* Do the work */ + refresh_disk_quota_model(false); + + /* emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + /* + * In case of a SIGHUP, just reload the configuration. + */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + } + + proc_exit(1); +} + +/* ---- Functions for lancher process ---- */ +/* + * Launcher process manages the worker processes based on + * GUC diskquota.monitor_databases in configuration file. + */ +void +disk_quota_launcher_main(Datum main_arg) +{ + List *dblist; + ListCell *cell; + HASHCTL hash_ctl; + int db_count = 0; + + /* Establish signal handlers before unblocking signals. */ + pqsignal(SIGHUP, disk_quota_sighup); + pqsignal(SIGTERM, disk_quota_sigterm); + + /* We're now ready to receive signals */ + BackgroundWorkerUnblockSignals(); + + /* Connect to our database */ + //BackgroundWorkerInitializeConnection("postgres", NULL, 0); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = NAMEDATALEN; + hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); + + disk_quota_worker_map = hash_create("disk quota worker map", + 1024, + &hash_ctl, + HASH_ELEM); + + dblist = get_database_list(); + + foreach(cell, dblist) + { + char *db_name; + + if (db_count >= 10) + break; + db_name = (char *)lfirst(cell); + if (db_name == NULL || *db_name == '\0') + { + elog(LOG, "invalid db name='%s' in diskquota.monitor_databases", db_name); + continue; + } + start_worker(db_name); + db_count++; + } + /* + * Main loop: do this until the SIGTERM handler tells us to terminate + */ + while (!got_sigterm) + { + int rc; + + /* + * Background workers mustn't call usleep() or any direct equivalent: + * instead, they may wait on their process latch, which sleeps as + * necessary, but is awakened if postmaster dies. That way the + * background process goes away immediately in an emergency. + */ + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L, PG_WAIT_EXTENSION); + ResetLatch(&MyProc->procLatch); + + /* emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + /* + * In case of a SIGHUP, just reload the configuration. + */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + /* terminate not monitored worker process and start new worker process */ + refresh_worker_list(); + } + + } + + proc_exit(1); +} + +/* + * database list found in GUC diskquota.monitored_database_list + */ +static List * +get_database_list(void) +{ + List *dblist = NULL; + char *dbstr; + + dbstr = pstrdup(diskquota_monitored_database_list); + + if (!SplitIdentifierString(dbstr, ',', &dblist)) + { + elog(WARNING, "cann't get database list from guc:'%s'", diskquota_monitored_database_list); + return NULL; + } + return dblist; +} + +/* + * When launcher receive SIGHUP, it will call refresh_worker_list() + * to terminate worker processes whose connected database no longer need + * to be monitored, and start new worker processes to watch new database. + */ +static void +refresh_worker_list(void) +{ + List *monitor_dblist; + List *removed_workerlist; + ListCell *cell; + ListCell *removed_workercell; + bool flag = false; + bool found; + DiskQuotaWorkerEntry *hash_entry; + HASH_SEQ_STATUS status; + int db_count = 0; + + removed_workerlist = NIL; + monitor_dblist = get_database_list(); + /* + * refresh the worker process based on the configuration file change. + * step 1 is to terminate worker processes whose connected database + * not in monitor database list. + */ + elog(LOG,"Refresh monitored database list."); + hash_seq_init(&status, disk_quota_worker_map); + + while ((hash_entry = (DiskQuotaWorkerEntry*) hash_seq_search(&status)) != NULL) + { + flag = false; + foreach(cell, monitor_dblist) + { + char *db_name; + + if (db_count >= 10) + break; + db_name = (char *)lfirst(cell); + if (db_name == NULL || *db_name == '\0') + { + continue; + } + if (strcmp(db_name, hash_entry->dbname) == 0 ) + { + flag = true; + break; + } + } + if (!flag) + { + removed_workerlist = lappend(removed_workerlist, hash_entry->dbname); + } + } + + foreach(removed_workercell, removed_workerlist) + { + DiskQuotaWorkerEntry* workerentry; + char *db_name; + BackgroundWorkerHandle *handle; + + db_name = (char *)lfirst(removed_workercell); + + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, + (void *)db_name, + HASH_REMOVE, &found); + if(found) + { + handle = workerentry->handle; + TerminateBackgroundWorker(handle); + } + } + + /* step 2: start new worker which first appears in monitor database list. */ + db_count = 0; + foreach(cell, monitor_dblist) + { + DiskQuotaWorkerEntry* workerentry; + char *db_name; + pid_t pid; + + if (db_count >= 10) + break; + db_name = (char *)lfirst(cell); + if (db_name == NULL || *db_name == '\0') + { + continue; + } + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, + (void *)db_name, + HASH_FIND, &found); + if (found) + { + /* in case worker is not in BGWH_STARTED mode, restart it. */ + if (GetBackgroundWorkerPid(workerentry->handle, &pid) != BGWH_STARTED) + start_worker(db_name); + } + else + { + start_worker(db_name); + } + } +} + +/* + * Dynamically launch an disk quota worker process. + */ +static int +start_worker(char* dbname) +{ + BackgroundWorker worker; + BackgroundWorkerHandle *handle; + BgwHandleStatus status; + pid_t pid; + bool found; + DiskQuotaWorkerEntry* workerentry; + + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | + BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + worker.bgw_restart_time = BGW_NEVER_RESTART; + sprintf(worker.bgw_library_name, "diskquota"); + sprintf(worker.bgw_function_name, "disk_quota_worker_main"); + snprintf(worker.bgw_name, BGW_MAXLEN, "%s", dbname); + /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ + worker.bgw_notify_pid = MyProcPid; + + if (!RegisterDynamicBackgroundWorker(&worker, &handle)) + return -1; + + status = WaitForBackgroundWorkerStartup(handle, &pid); + + if (status == BGWH_STOPPED) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("could not start background process"), + errhint("More details may be available in the server log."))); + if (status == BGWH_POSTMASTER_DIED) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("cannot start background processes without postmaster"), + errhint("Kill all remaining database processes and restart the database."))); + Assert(status == BGWH_STARTED); + + /* put the worker handle into the worker map */ + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, + (void *)dbname, + HASH_ENTER, &found); + if (!found) + { + workerentry->handle = handle; + } + + return pid; +} + +/* ---- Help Functions to set quota limit. ---- */ +/* + * Set disk quota limit for role. + */ +Datum +set_role_quota(PG_FUNCTION_ARGS) +{ + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); + roleoid = get_role_oid(rolname, false); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_quota_internal(roleoid, quota_limit_mb, ROLE_QUOTA); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for schema. + */ +Datum +set_schema_quota(PG_FUNCTION_ARGS) +{ + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); + namespaceoid = get_namespace_oid(nspname, false); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_quota_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); + PG_RETURN_VOID(); +} + +/* + * Write the quota limit info into quota_config table under + * 'diskquota' schema of the current database. + */ +static void +set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) +{ + int ret; + StringInfoData buf; + + initStringInfo(&buf); + appendStringInfo(&buf, + "select * from diskquota.quota_config where targetoid = %u" + " and quotatype =%d", + targetoid, type); + + SPI_connect(); + + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select quota setting table: error code %d", ret); + + /* if the schema or role's quota has been set before*/ + if (SPI_processed == 0 && quota_limit_mb > 0) + { + resetStringInfo(&buf); + initStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.quota_config values(%u,%d,%ld);", + targetoid, type, quota_limit_mb); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } + else if (SPI_processed > 0 && quota_limit_mb <= 0) + { + resetStringInfo(&buf); + initStringInfo(&buf); + appendStringInfo(&buf, + "delete from diskquota.quota_config where targetoid=%u" + " and quotatype=%d;", + targetoid, type); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + } + else if(SPI_processed > 0 && quota_limit_mb > 0) + { + resetStringInfo(&buf); + initStringInfo(&buf); + appendStringInfo(&buf, + "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" + " and quotatype=%d;", + quota_limit_mb, targetoid, type); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_UPDATE) + elog(ERROR, "cannot update quota setting table, error code %d", ret); + } + /* + * And finish our transaction. + */ + SPI_finish(); + return; +} + +/* + * Convert a human-readable size to a size in MB. + */ +static int64 +get_size_in_mb(char *str) +{ + char *strptr, *endptr; + char saved_char; + Numeric num; + int64 result; + bool have_digits = false; + + /* Skip leading whitespace */ + strptr = str; + while (isspace((unsigned char) *strptr)) + strptr++; + + /* Check that we have a valid number and determine where it ends */ + endptr = strptr; + + /* Part (1): sign */ + if (*endptr == '-' || *endptr == '+') + endptr++; + + /* Part (2): main digit string */ + if (isdigit((unsigned char) *endptr)) + { + have_digits = true; + do + endptr++; + while (isdigit((unsigned char) *endptr)); + } + + /* Part (3): optional decimal point and fractional digits */ + if (*endptr == '.') + { + endptr++; + if (isdigit((unsigned char) *endptr)) + { + have_digits = true; + do + endptr++; + while (isdigit((unsigned char) *endptr)); + } + } + + /* Complain if we don't have a valid number at this point */ + if (!have_digits) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid size: \"%s\"", str))); + + /* Part (4): optional exponent */ + if (*endptr == 'e' || *endptr == 'E') + { + long exponent; + char *cp; + + /* + * Note we might one day support EB units, so if what follows 'E' + * isn't a number, just treat it all as a unit to be parsed. + */ + exponent = strtol(endptr + 1, &cp, 10); + (void) exponent; /* Silence -Wunused-result warnings */ + if (cp > endptr + 1) + endptr = cp; + } + + /* + * Parse the number, saving the next character, which may be the first + * character of the unit string. + */ + saved_char = *endptr; + *endptr = '\0'; + + num = DatumGetNumeric(DirectFunctionCall3(numeric_in, + CStringGetDatum(strptr), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); + + *endptr = saved_char; + + /* Skip whitespace between number and unit */ + strptr = endptr; + while (isspace((unsigned char) *strptr)) + strptr++; + + /* Handle possible unit */ + if (*strptr != '\0') + { + int64 multiplier = 0; + + /* Trim any trailing whitespace */ + endptr = str + strlen(str) - 1; + + while (isspace((unsigned char) *endptr)) + endptr--; + + endptr++; + *endptr = '\0'; + + /* Parse the unit case-insensitively */ + if (pg_strcasecmp(strptr, "mb") == 0) + multiplier = ((int64) 1); + + else if (pg_strcasecmp(strptr, "gb") == 0) + multiplier = ((int64) 1024); + + else if (pg_strcasecmp(strptr, "tb") == 0) + multiplier = ((int64) 1024) * 1024 ; + else if (pg_strcasecmp(strptr, "pb") == 0) + multiplier = ((int64) 1024) * 1024 * 1024 ; + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid size: \"%s\"", str), + errdetail("Invalid size unit: \"%s\".", strptr), + errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); + + if (multiplier > 1) + { + Numeric mul_num; + + mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, + Int64GetDatum(multiplier))); + + num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, + NumericGetDatum(mul_num), + NumericGetDatum(num))); + } + } + + result = DatumGetInt64(DirectFunctionCall1(numeric_int8, + NumericGetDatum(num))); + + return result; +} diff --git a/diskquota.control b/diskquota.control new file mode 100644 index 00000000000..6c25e7cd945 --- /dev/null +++ b/diskquota.control @@ -0,0 +1,5 @@ +# diskquota extension +comment = 'Disk Quota Main Program' +default_version = '1.0' +module_pathname = '$libdir/diskquota' +relocatable = true diff --git a/diskquota.h b/diskquota.h new file mode 100644 index 00000000000..87b2fabf87e --- /dev/null +++ b/diskquota.h @@ -0,0 +1,33 @@ +#ifndef DISK_QUOTA_H +#define DISK_QUOTA_H + +#include "storage/lwlock.h" + +typedef enum +{ + NAMESPACE_QUOTA, + ROLE_QUOTA +} QuotaType; + +typedef struct +{ + LWLock *lock; /* protects shared memory of blackMap */ +} disk_quota_shared_state; + +/* enforcement interface*/ +extern void init_disk_quota_enforcement(void); + +/* quota model interface*/ +extern void init_disk_quota_shmem(void); +extern void init_disk_quota_model(void); +extern void refresh_disk_quota_model(bool force); +extern bool quota_check_common(Oid reloid); + +/* quotaspi interface */ +extern void init_disk_quota_hook(void); + +extern int diskquota_naptime; +extern char *diskquota_monitored_database_list; +extern int diskquota_max_active_tables; + +#endif diff --git a/diskquota_schedule b/diskquota_schedule new file mode 100644 index 00000000000..283ddc66bac --- /dev/null +++ b/diskquota_schedule @@ -0,0 +1,6 @@ +test: prepare +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename +test: test_partition +test: test_vacuum +test: clean + diff --git a/enforcement.c b/enforcement.c new file mode 100644 index 00000000000..72af5ba1550 --- /dev/null +++ b/enforcement.c @@ -0,0 +1,95 @@ +/* ------------------------------------------------------------------------- + * + * enforcment.c + * + * This code registers enforcement hooks to cancle the query which exceeds + * the quota limit. + * + * Copyright (C) 2013, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/diskquota/enforcement.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "executor/executor.h" +#include "storage/bufmgr.h" + +#include "diskquota.h" + +static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); +static bool quota_check_ReadBufferExtendCheckPerms(Oid reloid, BlockNumber blockNum); + +static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook; +static BufferExtendCheckPerms_hook_type prev_BufferExtendCheckPerms_hook; + +/* + * Initialize enforcement hooks. + */ +void +init_disk_quota_enforcement(void) +{ + /* enforcement hook before query is loading data */ + prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; + ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; + + /* enforcement hook during query is loading data*/ + prev_BufferExtendCheckPerms_hook = BufferExtendCheckPerms_hook; + BufferExtendCheckPerms_hook = quota_check_ReadBufferExtendCheckPerms; +} + +/* + * Enformcent hook function before query is loading data. Throws an error if + * you try to INSERT, UPDATE or COPY into a table, and the quota has been exceeded. + */ +static bool +quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) +{ + ListCell *l; + + foreach(l, rangeTable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + + /* see ExecCheckRTEPerms() */ + if (rte->rtekind != RTE_RELATION) + continue; + + /* + * Only check quota on inserts. UPDATEs may well increase + * space usage too, but we ignore that for now. + */ + if ((rte->requiredPerms & ACL_INSERT) == 0 && (rte->requiredPerms & ACL_UPDATE) == 0) + continue; + + /* Perform the check as the relation's owner and namespace */ + quota_check_common(rte->relid); + + } + + return true; +} + +/* + * Enformcent hook function when query is loading data. Throws an error if + * you try to extend a buffer page, and the quota has been exceeded. + */ +static bool +quota_check_ReadBufferExtendCheckPerms(Oid reloid, BlockNumber blockNum) +{ + bool isExtend; + + isExtend = (blockNum == P_NEW); + /* if not buffer extend, we could skip quota limit check*/ + if (!isExtend) + { + return true; + } + + /* Perform the check as the relation's owner and namespace */ + quota_check_common(reloid); + return true; +} + diff --git a/quotamodel.c b/quotamodel.c new file mode 100644 index 00000000000..2f5a6f2fd98 --- /dev/null +++ b/quotamodel.c @@ -0,0 +1,899 @@ +/* ------------------------------------------------------------------------- + * + * quotamodel.c + * + * This code is responsible for init disk quota model and refresh disk quota + * model. + * + * Copyright (C) 2013, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/diskquota/quotamodel.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/htup_details.h" +#include "access/reloptions.h" +#include "access/transam.h" +#include "access/xact.h" +#include "catalog/namespace.h" +#include "catalog/pg_class.h" +#include "catalog/pg_database.h" +#include "catalog/pg_type.h" +#include "commands/dbcommands.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "lib/stringinfo.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "storage/ipc.h" +#include "storage/latch.h" +#include "storage/lwlock.h" +#include "storage/shmem.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + +#include "activetable.h" +#include "diskquota.h" + +/* cluster level max size of black list */ +#define MAX_DISK_QUOTA_BLACK_ENTRIES (1024 * 1024) +/* cluster level init size of black list */ +#define INIT_DISK_QUOTA_BLACK_ENTRIES 8192 +/* per database level max size of black list */ +#define MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES 8192 + +typedef struct TableSizeEntry TableSizeEntry; +typedef struct NamespaceSizeEntry NamespaceSizeEntry; +typedef struct RoleSizeEntry RoleSizeEntry; +typedef struct QuotaLimitEntry QuotaLimitEntry; +typedef struct BlackMapEntry BlackMapEntry; +typedef struct LocalBlackMapEntry LocalBlackMapEntry; + +/* local cache of table disk size and corresponding schema and owner */ +struct TableSizeEntry +{ + Oid reloid; + Oid namespaceoid; + Oid owneroid; + int64 totalsize; + bool is_exist; /* flag used to check whether table is already dropped */ +}; + +/* local cache of namespace disk size */ +struct NamespaceSizeEntry +{ + Oid namespaceoid; + int64 totalsize; +}; + +/* local cache of role disk size */ +struct RoleSizeEntry +{ + Oid owneroid; + int64 totalsize; +}; + +/* local cache of disk quota limit */ +struct QuotaLimitEntry +{ + Oid targetoid; + int64 limitsize; +}; + +/* global blacklist for which exceed their quota limit */ +struct BlackMapEntry +{ + Oid targetoid; + Oid databaseoid; + uint32 targettype; +}; + +/* local blacklist for which exceed their quota limit */ +struct LocalBlackMapEntry +{ + BlackMapEntry keyitem; + bool isexceeded; +}; + +/* using hash table to support incremental update the table size entry.*/ +static HTAB *table_size_map = NULL; +static HTAB *namespace_size_map = NULL; +static HTAB *role_size_map = NULL; +static HTAB *namespace_quota_limit_map = NULL; +static HTAB *role_quota_limit_map = NULL; + +/* black list for database objects which exceed their quota limit */ +static HTAB *disk_quota_black_map = NULL; +static HTAB *local_disk_quota_black_map = NULL; + +static disk_quota_shared_state *black_map_shm_lock; +disk_quota_shared_state *active_table_shm_lock = NULL; + +static shmem_startup_hook_type prev_shmem_startup_hook = NULL; + +/* functions to refresh disk quota model*/ +static void refresh_disk_quota_usage(bool force); +static void calculate_table_disk_usage(bool force); +static void calculate_schema_disk_usage(void); +static void calculate_role_disk_usage(void); +static void flush_local_black_map(void); +static void reset_local_black_map(void); +static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type); +static void update_namespace_map(Oid namespaceoid, int64 updatesize); +static void update_role_map(Oid owneroid, int64 updatesize); +static void remove_namespace_map(Oid namespaceoid); +static void remove_role_map(Oid owneroid); +static bool load_quotas(void); + +static Size DiskQuotaShmemSize(void); +static void disk_quota_shmem_startup(void); + +/* + * DiskQuotaShmemSize + * Compute space needed for diskquota-related shared memory + */ +Size +DiskQuotaShmemSize(void) +{ + Size size; + + size = MAXALIGN(sizeof(disk_quota_shared_state)); + size = add_size(size, size); // two locks + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); + return size; +} + +/* + * DiskQuotaShmemInit + * Allocate and initialize diskquota-related shared memory + */ +void +disk_quota_shmem_startup(void) +{ + bool found; + HASHCTL hash_ctl; + + if (prev_shmem_startup_hook) + (*prev_shmem_startup_hook)(); + + black_map_shm_lock = NULL; + disk_quota_black_map = NULL; + + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + black_map_shm_lock = ShmemInitStruct("disk_quota_black_map_shm_lock", + sizeof(disk_quota_shared_state), + &found); + + if (!found) + { + black_map_shm_lock->lock = &(GetNamedLWLockTranche("disk_quota_black_map_shm_lock"))->lock; + } + + init_lock_active_tables(); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(BlackMapEntry); + hash_ctl.entrysize = sizeof(BlackMapEntry); + hash_ctl.hash = tag_hash; + + disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", + INIT_DISK_QUOTA_BLACK_ENTRIES, + MAX_DISK_QUOTA_BLACK_ENTRIES, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + + init_shm_worker_active_tables(); + + LWLockRelease(AddinShmemInitLock); +} + +void +init_disk_quota_shmem(void) +{ + /* + * Request additional shared resources. (These are no-ops if we're not in + * the postmaster process.) We'll allocate or attach to the shared + * resources in pgss_shmem_startup(). + */ + RequestAddinShmemSpace(DiskQuotaShmemSize()); + RequestNamedLWLockTranche("disk_quota_black_map_shm_lock", 1); + RequestNamedLWLockTranche("disk_quota_active_table_shm_lock", 1); + + /* + * Install startup hook to initialize our shared memory. + */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = disk_quota_shmem_startup; +} + +/* + * Init disk quota model when the worker process firstly started. + */ +void +init_disk_quota_model(void) +{ + HASHCTL hash_ctl; + + /* init hash table for table/schema/role etc.*/ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(TableSizeEntry); + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = oid_hash; + + table_size_map = hash_create("TableSizeEntry map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(NamespaceSizeEntry); + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = oid_hash; + + namespace_size_map = hash_create("NamespaceSizeEntry map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(RoleSizeEntry); + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = oid_hash; + + role_size_map = hash_create("RoleSizeEntry map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(QuotaLimitEntry); + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = oid_hash; + + namespace_quota_limit_map = hash_create("Namespace QuotaLimitEntry map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + role_quota_limit_map = hash_create("Role QuotaLimitEntry map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(BlackMapEntry); + hash_ctl.entrysize = sizeof(LocalBlackMapEntry); + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = tag_hash; + + local_disk_quota_black_map = hash_create("local blackmap whose quota limitation is reached", + MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + init_relfilenode_key(); +} + +/* + * diskquota worker will refresh disk quota model + * periodically. It will reload quota setting and + * recalculate the changed disk usage. + */ +void +refresh_disk_quota_model(bool force) +{ + elog(DEBUG1,"check disk quota begin"); + StartTransactionCommand(); + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + /* skip refresh model when load_quotas failed */ + if (load_quotas()) + { + refresh_disk_quota_usage(force); + } + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + elog(DEBUG1,"check disk quota end"); +} + +/* + * Update the disk usage of nameapsce and role. + * Put the exceeded namespace and role into shared black map. + */ +static void +refresh_disk_quota_usage(bool force) +{ + /* copy shared black map to local black map */ + reset_local_black_map(); + /* recalculate the disk usage of table, schema and role */ + calculate_table_disk_usage(force); + calculate_schema_disk_usage(); + calculate_role_disk_usage(); + /* copy local black map back to shared black map */ + flush_local_black_map(); +} + +/* + * Generate the new shared blacklist from the local_black_list which + * exceed the quota limit. + * local_balck_list is used to reduce the lock race. + */ +static void +flush_local_black_map(void) +{ + HASH_SEQ_STATUS iter; + LocalBlackMapEntry* localblackentry; + BlackMapEntry* blackentry; + bool found; + + LWLockAcquire(black_map_shm_lock->lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, local_disk_quota_black_map); + while ((localblackentry = hash_seq_search(&iter)) != NULL) + { + if (localblackentry->isexceeded) + { + blackentry = (BlackMapEntry*) hash_search(disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_ENTER_NULL, &found); + if (blackentry == NULL) + { + elog(WARNING, "shared disk quota black map size limit reached."); + } + else + { + /* new db objects which exceed quota limit */ + if (!found) + { + blackentry->targetoid = localblackentry->keyitem.targetoid; + blackentry->databaseoid = MyDatabaseId; + blackentry->targettype = localblackentry->keyitem.targettype; + } + } + } + else + { + /* db objects are removed or under quota limit in the new loop */ + (void) hash_search(disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_REMOVE, NULL); + } + } + LWLockRelease(black_map_shm_lock->lock); +} + +/* Fetch the new blacklist from shared blacklist at each refresh iteration. */ +static void +reset_local_black_map(void) +{ + HASH_SEQ_STATUS iter; + LocalBlackMapEntry* localblackentry; + BlackMapEntry* blackentry; + bool found; + /* clear entries in local black map*/ + hash_seq_init(&iter, local_disk_quota_black_map); + + while ((localblackentry = hash_seq_search(&iter)) != NULL) + { + (void) hash_search(local_disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_REMOVE, NULL); + } + + /* get black map copy from shared black map */ + LWLockAcquire(black_map_shm_lock->lock, LW_SHARED); + hash_seq_init(&iter, disk_quota_black_map); + while ((blackentry = hash_seq_search(&iter)) != NULL) + { + /* only reset entries for current db */ + if (blackentry->databaseoid == MyDatabaseId) + { + localblackentry = (LocalBlackMapEntry*) hash_search(local_disk_quota_black_map, + (void *) blackentry, + HASH_ENTER, &found); + if (!found) + { + localblackentry->isexceeded = false; + } + } + } + LWLockRelease(black_map_shm_lock->lock); + +} + +/* + * Compare the disk quota limit and current usage of a database object. + * Put them into local blacklist if quota limit is exceeded. + */ +static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) +{ + bool found; + int32 quota_limit_mb; + int32 current_usage_mb; + LocalBlackMapEntry* localblackentry; + BlackMapEntry keyitem; + + QuotaLimitEntry* quota_entry; + if (type == NAMESPACE_QUOTA) + { + quota_entry = (QuotaLimitEntry *)hash_search(namespace_quota_limit_map, + &targetOid, + HASH_FIND, &found); + } + else if (type == ROLE_QUOTA) + { + quota_entry = (QuotaLimitEntry *)hash_search(role_quota_limit_map, + &targetOid, + HASH_FIND, &found); + } + else + { + /* skip check if not namespace or role quota*/ + return; + } + + if (!found) + { + /* default no limit */ + return; + } + + quota_limit_mb = quota_entry->limitsize; + current_usage_mb = current_usage / (1024 *1024); + if(current_usage_mb >= quota_limit_mb) + { + memset(&keyitem, 0, sizeof(BlackMapEntry)); + keyitem.targetoid = targetOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.targettype = (uint32)type; + elog(DEBUG1,"Put object %u to blacklist with quota limit:%d, current usage:%d", + targetOid, quota_limit_mb, current_usage_mb); + localblackentry = (LocalBlackMapEntry*) hash_search(local_disk_quota_black_map, + &keyitem, + HASH_ENTER, &found); + localblackentry->isexceeded = true; + } + +} + +/* + * Remove a namespace from local namespace_size_map + */ +static void +remove_namespace_map(Oid namespaceoid) +{ + hash_search(namespace_size_map, + &namespaceoid, + HASH_REMOVE, NULL); +} + +/* + * Update the current disk usage of a namespace in namespace_size_map. + */ +static void +update_namespace_map(Oid namespaceoid, int64 updatesize) +{ + bool found; + NamespaceSizeEntry* nsentry; + nsentry = (NamespaceSizeEntry *)hash_search(namespace_size_map, + &namespaceoid, + HASH_ENTER, &found); + if (!found) + { + nsentry->namespaceoid = namespaceoid; + nsentry->totalsize = updatesize; + } + else { + nsentry->totalsize += updatesize; + } + +} + +/* + * Remove a namespace from local role_size_map + */ +static void +remove_role_map(Oid owneroid) +{ + hash_search(role_size_map, + &owneroid, + HASH_REMOVE, NULL); +} + +/* + * Update the current disk usage of a namespace in role_size_map. + */ +static void +update_role_map(Oid owneroid, int64 updatesize) +{ + bool found; + RoleSizeEntry* rolentry; + rolentry = (RoleSizeEntry *)hash_search(role_size_map, + &owneroid, + HASH_ENTER, &found); + if (!found) + { + rolentry->owneroid = owneroid; + rolentry->totalsize = updatesize; + } + else { + rolentry->totalsize += updatesize; + } + +} + +/* + * Incremental way to update the disk quota of every database objects + * Recalculate the table's disk usage when it's a new table or active table. + * Detect the removed table if it's nolonger in pg_class. + * If change happens, no matter size change or owner change, + * update namespace_size_map and role_size_map correspondingly. + * Parameter 'force' set to true at initialization stage to recalculate + * the file size of all the tables. + * + */ +static void +calculate_table_disk_usage(bool force) +{ + bool found; + bool active_tbl_found; + Relation classRel; + HeapTuple tuple; + HeapScanDesc relScan; + TableSizeEntry *tsentry; + Oid relOid; + HASH_SEQ_STATUS iter; + HTAB *local_active_table_stat_map; + DiskQuotaActiveTableEntry *active_table_entry; + + classRel = heap_open(RelationRelationId, AccessShareLock); + relScan = heap_beginscan_catalog(classRel, 0, NULL); + + local_active_table_stat_map = get_active_tables(); + + /* unset is_exist flag for tsentry in table_size_map*/ + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + tsentry->is_exist = false; + } + + /* + * scan pg_class to detect table event: drop, reset schema, reset owenr. + * calculate the file size for active table and update namespace_size_map + * and role_size_map + */ + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) + { + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + found = false; + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW) + continue; + relOid = HeapTupleGetOid(tuple); + + /* ignore system table*/ + if(relOid < FirstNormalObjectId) + continue; + + tsentry = (TableSizeEntry *)hash_search(table_size_map, + &relOid, + HASH_ENTER, &found); + /* mark tsentry is_exist */ + if (tsentry) + tsentry->is_exist = true; + + active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &relOid, HASH_FIND, &active_tbl_found); + + /* skip to recalculate the tables which are not in active list and not at initializatio stage*/ + if(active_tbl_found || force) + { + + /* namespace and owner may be changed since last check*/ + if (!found) + { + /* if it's a new table*/ + tsentry->reloid = relOid; + tsentry->namespaceoid = classForm->relnamespace; + tsentry->owneroid = classForm->relowner; + if (!force) + { + tsentry->totalsize = (int64) active_table_entry->tablesize; + } + else + { + tsentry->totalsize = DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(relOid))); + } + update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); + update_role_map(tsentry->owneroid, tsentry->totalsize); + } + else + { + /* if not new table in table_size_map, it must be in active table list */ + if (active_tbl_found) + { + int64 oldtotalsize = tsentry->totalsize; + tsentry->totalsize = (int64) active_table_entry->tablesize; + update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); + update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); + } + } + } + + /* if schema change, transfer the file size */ + if (tsentry->namespaceoid != classForm->relnamespace) + { + update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); + tsentry->namespaceoid = classForm->relnamespace; + update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); + } + /* if owner change, transfer the file size */ + if(tsentry->owneroid != classForm->relowner) + { + update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); + tsentry->owneroid = classForm->relowner; + update_role_map(tsentry->owneroid, tsentry->totalsize); + } + } + + heap_endscan(relScan); + heap_close(classRel, AccessShareLock); + hash_destroy(local_active_table_stat_map); + + /* process removed tables */ + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + if (tsentry->is_exist == false) + { + update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); + update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); + + hash_search(table_size_map, + &tsentry->reloid, + HASH_REMOVE, NULL); + continue; + } + } +} + +/* + * Check the namespace quota limit and current usage + * Remove dropped namespace from namespace_size_map + */ +static void calculate_schema_disk_usage(void) +{ + HeapTuple tuple; + HASH_SEQ_STATUS iter; + NamespaceSizeEntry* nsentry; + hash_seq_init(&iter, namespace_size_map); + + while ((nsentry = hash_seq_search(&iter)) != NULL) + { + /* check if namespace is already be deleted */ + tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nsentry->namespaceoid)); + if (!HeapTupleIsValid(tuple)) + { + remove_namespace_map(nsentry->namespaceoid); + continue; + } + ReleaseSysCache(tuple); + check_disk_quota_by_oid(nsentry->namespaceoid, nsentry->totalsize, NAMESPACE_QUOTA); + } +} + +/* + * Check the role quota limit and current usage + * Remove dropped role from roel_size_map + */ +static void calculate_role_disk_usage(void) +{ + HeapTuple tuple; + HASH_SEQ_STATUS iter; + RoleSizeEntry* rolentry; + hash_seq_init(&iter, role_size_map); + + while ((rolentry = hash_seq_search(&iter)) != NULL) + { + /* check if role is already be deleted */ + tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(rolentry->owneroid)); + if (!HeapTupleIsValid(tuple)) + { + remove_role_map(rolentry->owneroid); + continue; + } + ReleaseSysCache(tuple); + check_disk_quota_by_oid(rolentry->owneroid, rolentry->totalsize, ROLE_QUOTA); + } +} + +/* + * Load quotas from diskquota configuration table(quota_config). +*/ +static bool +load_quotas(void) +{ + int ret; + TupleDesc tupdesc; + int i; + bool found; + QuotaLimitEntry* quota_entry; + HASH_SEQ_STATUS iter; + + RangeVar *rv; + Relation rel; + + rv = makeRangeVar("diskquota", "quota_config", -1); + rel = heap_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + elog(LOG, "configuration table \"quota_config\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + return false; + } + heap_close(rel, NoLock); + + /* clear entries in quota limit map*/ + hash_seq_init(&iter, namespace_quota_limit_map); + while ((quota_entry = hash_seq_search(&iter)) != NULL) + { + (void) hash_search(namespace_quota_limit_map, + (void *) "a_entry->targetoid, + HASH_REMOVE, NULL); + } + + hash_seq_init(&iter, role_quota_limit_map); + while ((quota_entry = hash_seq_search(&iter)) != NULL) + { + (void) hash_search(role_quota_limit_map, + (void *) "a_entry->targetoid, + HASH_REMOVE, NULL); + } + + ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); + if (ret != SPI_OK_SELECT) + elog(FATAL, "SPI_execute failed: error code %d", ret); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 3 || + TupleDescAttr(tupdesc, 0)->atttypid != OIDOID || + TupleDescAttr(tupdesc, 1)->atttypid != INT4OID || + TupleDescAttr(tupdesc, 2)->atttypid != INT8OID) + { + elog(LOG, "configuration table \"quota_config\" is corruptted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + return false; + } + + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + Oid targetOid; + int64 quota_limit_mb; + QuotaType quotatype; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + continue; + targetOid = DatumGetObjectId(dat); + + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); + if (isnull) + continue; + quotatype = (QuotaType)DatumGetInt32(dat); + + dat = SPI_getbinval(tup, tupdesc, 3, &isnull); + if (isnull) + continue; + quota_limit_mb = DatumGetInt64(dat); + + if (quotatype == NAMESPACE_QUOTA) + { + quota_entry = (QuotaLimitEntry *)hash_search(namespace_quota_limit_map, + &targetOid, + HASH_ENTER, &found); + quota_entry->limitsize = quota_limit_mb; + } + else if (quotatype == ROLE_QUOTA) + { + quota_entry = (QuotaLimitEntry *)hash_search(role_quota_limit_map, + &targetOid, + HASH_ENTER, &found); + quota_entry->limitsize = quota_limit_mb; + } + } + return true; +} + +/* + * Given table oid, search for namespace and owner. + */ +static void +get_rel_owner_schema(Oid relid, Oid *ownerOid, Oid *nsOid) +{ + HeapTuple tp; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(tp)) + { + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + *ownerOid = reltup->relowner; + *nsOid = reltup->relnamespace; + ReleaseSysCache(tp); + } + return; +} + +/* + * Given table oid, check whether quota limit + * of table's schema or table's owner are reached. + * Do enforcemet if quota exceeds. + */ +bool +quota_check_common(Oid reloid) +{ + Oid ownerOid = InvalidOid; + Oid nsOid = InvalidOid; + bool found; + BlackMapEntry keyitem; + memset(&keyitem, 0, sizeof(BlackMapEntry)); + get_rel_owner_schema(reloid, &ownerOid, &nsOid); + LWLockAcquire(black_map_shm_lock->lock, LW_SHARED); + + if ( nsOid != InvalidOid) + { + keyitem.targetoid = nsOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.targettype = NAMESPACE_QUOTA; + hash_search(disk_quota_black_map, + &keyitem, + HASH_FIND, &found); + if (found) + { + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("schema's disk space quota exceeded with name:%s", get_namespace_name(nsOid)))); + return false; + } + + } + + if ( ownerOid != InvalidOid) + { + keyitem.targetoid = ownerOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.targettype = ROLE_QUOTA; + hash_search(disk_quota_black_map, + &keyitem, + HASH_FIND, &found); + if (found) + { + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(ownerOid, false)))); + return false; + } + } + LWLockRelease(black_map_shm_lock->lock); + return true; +} + diff --git a/sql/clean.sql b/sql/clean.sql new file mode 100644 index 00000000000..b999009fb50 --- /dev/null +++ b/sql/clean.sql @@ -0,0 +1,5 @@ +drop table badquota.t1; +drop role testbody; +drop schema badquota; + +drop extension diskquota; diff --git a/sql/dummy.sql b/sql/dummy.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/empty.sql b/sql/empty.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/prepare.sql b/sql/prepare.sql new file mode 100644 index 00000000000..0392c339b75 --- /dev/null +++ b/sql/prepare.sql @@ -0,0 +1,15 @@ +create extension diskquota; +select pg_sleep(1); +\! pg_ctl -D /tmp/pg_diskquota_test/data reload +\! cp data/csmall.txt /tmp/csmall.txt +select pg_sleep(5); + +-- prepare a schema that has reached quota limit +create schema badquota; +select diskquota.set_schema_quota('badquota', '1 MB'); +create role testbody; +create table badquota.t1(i int); +alter table badquota.t1 owner to testbody; +insert into badquota.t1 select generate_series(0, 50000); +select pg_sleep(5); +insert into badquota.t1 select generate_series(0, 10); diff --git a/sql/test_column.sql b/sql/test_column.sql new file mode 100644 index 00000000000..62f0f0770d8 --- /dev/null +++ b/sql/test_column.sql @@ -0,0 +1,19 @@ +-- Test alter table add column +create schema scolumn; +select diskquota.set_schema_quota('scolumn', '1 MB'); +set search_path to scolumn; +select pg_sleep(5); + +create table a2(i int); +insert into a2 select generate_series(1,20000); +insert into a2 select generate_series(1,10); +ALTER TABLE a2 ADD COLUMN j varchar(50); +update a2 set j = 'add value for column j'; +select pg_sleep(5); +-- expect insert failed after add column +insert into a2 select generate_series(1,10); + +drop table a2; +reset search_path; +drop schema scolumn; + diff --git a/sql/test_copy.sql b/sql/test_copy.sql new file mode 100644 index 00000000000..07a525601d6 --- /dev/null +++ b/sql/test_copy.sql @@ -0,0 +1,17 @@ +-- Test copy +create schema s3; +select diskquota.set_schema_quota('s3', '1 MB'); +set search_path to s3; + +create table c (i int); +copy c from '/tmp/csmall.txt'; +-- expect failed +insert into c select generate_series(1,100000000); +select pg_sleep(5); +-- select pg_total_table_size('c'); +-- expect copy fail +copy c from '/tmp/csmall.txt'; + +drop table c; +reset search_path; +drop schema s3; diff --git a/sql/test_drop_table.sql b/sql/test_drop_table.sql new file mode 100644 index 00000000000..21147c38675 --- /dev/null +++ b/sql/test_drop_table.sql @@ -0,0 +1,20 @@ +-- Test Drop table +create schema sdrtbl; +select diskquota.set_schema_quota('sdrtbl', '1 MB'); +set search_path to sdrtbl; +create table a(i int); +create table a2(i int); +insert into a select generate_series(1,100); +-- expect insert fail +insert into a select generate_series(1,100000000); +-- expect insert fail +insert into a2 select generate_series(1,100); +drop table a; +select pg_sleep(5); +insert into a2 select generate_series(1,100); + +drop table a2; +reset search_path; +drop schema sdrtbl; + + diff --git a/sql/test_partition.sql b/sql/test_partition.sql new file mode 100644 index 00000000000..e9eae44be63 --- /dev/null +++ b/sql/test_partition.sql @@ -0,0 +1,34 @@ +-- Test partition table +create schema s8; +select diskquota.set_schema_quota('s8', '1 MB'); +set search_path to s8; +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +)PARTITION BY RANGE (logdate); +CREATE TABLE measurement_y2006m02 PARTITION OF measurement + FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); + +CREATE TABLE measurement_y2006m03 PARTITION OF measurement + FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); +insert into measurement select generate_series(1,15000), '2006-02-01' ,1,1; +select pg_sleep(5); +insert into measurement select 1, '2006-02-01' ,1,1; +-- expect insert fail +insert into measurement select generate_series(1,100000000), '2006-03-02' ,1,1; +-- expect insert fail +insert into measurement select 1, '2006-02-01' ,1,1; +-- expect insert fail +insert into measurement select 1, '2006-03-03' ,1,1; +delete from measurement where logdate='2006-03-02'; +vacuum full measurement; +select pg_sleep(5); +insert into measurement select 1, '2006-02-01' ,1,1; +insert into measurement select 1, '2006-03-03' ,1,1; + +drop table measurement; +reset search_path; +drop schema s8; + diff --git a/sql/test_rename.sql b/sql/test_rename.sql new file mode 100644 index 00000000000..3516bb06f17 --- /dev/null +++ b/sql/test_rename.sql @@ -0,0 +1,48 @@ +-- test rename schema +create schema srs1; +select diskquota.set_schema_quota('srs1', '1 MB'); +set search_path to srs1; +create table a(i int); +-- expect insert fail +insert into a select generate_series(1,100000000); +-- expect insert fail +insert into a select generate_series(1,10); +alter schema srs1 rename to srs2; +set search_path to srs2; + +-- expect insert fail +insert into a select generate_series(1,10); +-- test rename table +alter table a rename to a2; +-- expect insert fail +insert into a2 select generate_series(1,10); + +drop table a2; +reset search_path; +drop schema srs2; + +-- test rename role +create schema srr1; +create role srerole nologin; +select diskquota.set_role_quota('srerole', '1MB'); +set search_path to srr1; +create table a(i int); +alter table a owner to srerole; + +-- expect insert fail +insert into a select generate_series(1,100000000); +-- expect insert fail +insert into a select generate_series(1,10); +alter role srerole rename to srerole2; +-- expect insert fail +insert into a select generate_series(1,10); +-- test rename table +alter table a rename to a2; +-- expect insert fail +insert into a2 select generate_series(1,10); + +drop table a2; +drop role srerole2; +reset search_path; +drop schema srr1; + diff --git a/sql/test_reschema.sql b/sql/test_reschema.sql new file mode 100644 index 00000000000..66b690341cf --- /dev/null +++ b/sql/test_reschema.sql @@ -0,0 +1,19 @@ +-- Test re-set_schema_quota +create schema srE; +select diskquota.set_schema_quota('srE', '1 MB'); +set search_path to srE; +create table a(i int); +-- expect insert fail +insert into a select generate_series(1,1000000000); +-- expect insert fail when exceed quota limit +insert into a select generate_series(1,1000); +-- set schema quota larger +select diskquota.set_schema_quota('srE', '1 GB'); +select pg_sleep(5); +-- expect insert succeed +insert into a select generate_series(1,1000); + +drop table a; +reset search_path; +drop schema srE; + diff --git a/sql/test_role.sql b/sql/test_role.sql new file mode 100644 index 00000000000..8a482f7fb6d --- /dev/null +++ b/sql/test_role.sql @@ -0,0 +1,32 @@ +-- Test role quota + +create schema srole; +set search_path to srole; + +CREATE role u1 NOLOGIN; +CREATE role u2 NOLOGIN; +CREATE TABLE b (t text); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t text); +ALTER TABLE b2 OWNER TO u1; + +select diskquota.set_role_quota('u1', '1 MB'); + +insert into b select generate_series(1,100); +-- expect insert fail +insert into b select generate_series(1,100000000); +-- expect insert fail +insert into b select generate_series(1,100); +-- expect insert fail +insert into b2 select generate_series(1,100); +alter table b owner to u2; +select pg_sleep(5); +-- expect insert succeed +insert into b select generate_series(1,100); +-- expect insert succeed +insert into b2 select generate_series(1,100); + +drop table b, b2; +drop role u1, u2; +reset search_path; +drop schema srole; diff --git a/sql/test_schema.sql b/sql/test_schema.sql new file mode 100644 index 00000000000..392e898d8d6 --- /dev/null +++ b/sql/test_schema.sql @@ -0,0 +1,32 @@ +-- Test schema +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +set search_path to s1; + +create table a(i int); +insert into a select generate_series(1,100); +-- expect insert fail +insert into a select generate_series(1,100000000); +-- expect insert fail +insert into a select generate_series(1,100); +create table a2(i int); +-- expect insert fail +insert into a2 select generate_series(1,100); + +-- Test alter table set schema +create schema s2; +alter table s1.a set schema s2; +select pg_sleep(5); +-- expect insert succeed +insert into a2 select generate_series(1,20000); +-- expect insert succeed +insert into s2.a select generate_series(1,20000); + +alter table s2.a set schema badquota; +-- expect failed +insert into badquota.a select generate_series(0, 100); + +reset search_path; +drop table s1.a2, badquota.a; +drop schema s1, s2; + diff --git a/sql/test_temp_role.sql b/sql/test_temp_role.sql new file mode 100644 index 00000000000..8dc082f455f --- /dev/null +++ b/sql/test_temp_role.sql @@ -0,0 +1,22 @@ +-- Test temp table restrained by role id +create schema strole; +create role u3temp nologin; +set search_path to strole; + +select diskquota.set_role_quota('u3temp', '1MB'); +create table a(i int); +alter table a owner to u3temp; +create temp table ta(i int); +alter table ta owner to u3temp; + +-- expected failed: fill temp table +insert into ta select generate_series(1,100000000); +-- expected failed: +insert into a select generate_series(1,100); +drop table ta; +select pg_sleep(5); +insert into a select generate_series(1,100); + +drop table a; +reset search_path; +drop schema strole; diff --git a/sql/test_toast.sql b/sql/test_toast.sql new file mode 100644 index 00000000000..7ee6e3666ee --- /dev/null +++ b/sql/test_toast.sql @@ -0,0 +1,23 @@ +-- Test toast +create schema s5; +select diskquota.set_schema_quota('s5', '1 MB'); +set search_path to s5; +CREATE TABLE a5 (message text); +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,10000)) +FROM generate_series(1,10); + +select pg_sleep(5); +-- expect insert toast fail +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,100000)) +FROM generate_series(1,1000000); + +drop table a5; +reset search_path; +drop schema s5; + diff --git a/sql/test_truncate.sql b/sql/test_truncate.sql new file mode 100644 index 00000000000..d19c10777d5 --- /dev/null +++ b/sql/test_truncate.sql @@ -0,0 +1,21 @@ +-- Test truncate +create schema s7; +select diskquota.set_schema_quota('s7', '1 MB'); +set search_path to s7; +create table a (i int); +create table b (i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); +-- expect insert fail +insert into a select generate_series(1,30); +insert into b select generate_series(1,30); +truncate table a; +select pg_sleep(5); +-- expect insert succeed +insert into a select generate_series(1,30); +insert into b select generate_series(1,30); + +drop table a, b; +reset search_path; +drop schema s7; + diff --git a/sql/test_update.sql b/sql/test_update.sql new file mode 100644 index 00000000000..506cf4022cd --- /dev/null +++ b/sql/test_update.sql @@ -0,0 +1,13 @@ +-- Test Update +create schema s4; +select diskquota.set_schema_quota('s4', '1 MB'); +set search_path to s4; +create table a(i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); +-- expect update fail. +update a set i = 100; +drop table a; +reset search_path; +drop schema s4; + diff --git a/sql/test_vacuum.sql b/sql/test_vacuum.sql new file mode 100644 index 00000000000..2f651d22af0 --- /dev/null +++ b/sql/test_vacuum.sql @@ -0,0 +1,23 @@ +-- Test vacuum full +create schema s6; +select diskquota.set_schema_quota('s6', '1 MB'); +set search_path to s6; +create table a (i int); +create table b (i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); +-- expect insert fail +insert into a select generate_series(1,10); +-- expect insert fail +insert into b select generate_series(1,10); +delete from a where i > 10; +vacuum full a; +select pg_sleep(5); +-- expect insert succeed +insert into a select generate_series(1,10); +insert into b select generate_series(1,10); + +drop table a, b; +reset search_path; +drop schema s6; + diff --git a/test_diskquota.conf b/test_diskquota.conf new file mode 100644 index 00000000000..03b04262ee2 --- /dev/null +++ b/test_diskquota.conf @@ -0,0 +1,5 @@ +autovacuum = off +fsync = on +shared_preload_libraries = 'diskquota' +diskquota.monitor_databases = 'contrib_regression' +diskquota.naptime = 2 From e25f6e22a5ba219bea367d1742c35b8ae34b00f7 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 6 Nov 2018 07:04:11 +0000 Subject: [PATCH 003/330] Add patch to add hook functions in postgres. Hook function patch is not merged into postgres yet, so user need to apply patch manually on pg source code. Co-authored-by: Haozhou Wang Co-authored-by: Hubert Zhang Co-authored-by: Hao Wu --- README.md | 22 +++++-- patch/pg_hooks.patch | 141 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+), 6 deletions(-) create mode 100644 patch/pg_hooks.patch diff --git a/README.md b/README.md index 15f595fe6a1..03918732227 100644 --- a/README.md +++ b/README.md @@ -32,27 +32,37 @@ The 'during query' one is implemented at BufferExtendCheckPerms_hook in function Quota limit of a schema or a role is stored in table 'quota_config' in 'diskquota' schema in monitored database. So each database stores and manages its own disk quota configuration. Note that although role is a db object in cluster level, we limit the diskquota of a role to be database specific. That is to say, a role may has different quota limit on different databases and their disk usage is isolated between databases. # Install -1. Compile and install disk quota. +1. Add hook functions to Postgres by applying patch. It's required +since disk quota need to add some new hook functions in postgres core. +This step would be skipped after patch is merged into postgres in future. +``` +# install patch into postgres_src and rebuild postgres. +cd postgres_src; +git apply $diskquota_src/patch/pg_hooks.patch; +make; +make install; +``` +2. Compile and install disk quota. ``` -cd contrib/diskquota; +cd $diskquota_src; make; make install; ``` -2. Config postgresql.conf +3. Config postgresql.conf ``` # enable diskquota in preload library. shared_preload_libraries = 'diskquota' # set monitored databases diskquota.monitor_databases = 'postgres' -# set naptime (second) to refresh the disk quota stats periodically +# set naptime (second) to refresh the disk quota stats periodically diskquota.naptime = 2 ``` -3. Create diskquota extension in monitored database. +4. Create diskquota extension in monitored database. ``` create extension diskquota; ``` -4. Reload database configuraion +5. Reload database configuraion ``` # reset monitored database list in postgresql.conf diskquota.monitor_databases = 'postgres, postgres2' diff --git a/patch/pg_hooks.patch b/patch/pg_hooks.patch new file mode 100644 index 00000000000..0f67ec41cb5 --- /dev/null +++ b/patch/pg_hooks.patch @@ -0,0 +1,141 @@ +From ff8686c23badd5602bfb997c4fe761c19fa66f9e Mon Sep 17 00:00:00 2001 +From: Hubert Zhang +Date: Tue, 6 Nov 2018 06:51:22 +0000 +Subject: [PATCH] Add hooks for diskquota extension. + +Add BufferExtendCheckPerms_hook to support quota enforcement +Add SmgrStat_hook to detect active relfilenodes. + +Co-authored-by: Haozhou Wang +Co-authored-by: Hubert Zhang +Co-authored-by: Hao Wu +--- + src/backend/storage/buffer/bufmgr.c | 14 ++++++++++++++ + src/backend/storage/smgr/smgr.c | 21 ++++++++++++++++++++- + src/include/storage/bufmgr.h | 8 ++++++++ + src/include/storage/smgr.h | 6 ++++++ + 4 files changed, 48 insertions(+), 1 deletion(-) + +diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c +index 01eabe5..d977350 100644 +--- a/src/backend/storage/buffer/bufmgr.c ++++ b/src/backend/storage/buffer/bufmgr.c +@@ -104,6 +104,13 @@ typedef struct CkptTsStatus + int index; + } CkptTsStatus; + ++/* ++ * Hook for plugins to check permissions when doing a buffer extend. ++ * One example is to check whether there is additional disk quota for ++ * the table to be inserted. ++ */ ++BufferExtendCheckPerms_hook_type BufferExtendCheckPerms_hook = NULL; ++ + /* GUC variables */ + bool zero_damaged_pages = false; + int bgwriter_lru_maxpages = 100; +@@ -661,6 +668,13 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, + * miss. + */ + pgstat_count_buffer_read(reln); ++ ++ /* check permissions when doing a buffer extend */ ++ if (blockNum == P_NEW && BufferExtendCheckPerms_hook) ++ { ++ (*BufferExtendCheckPerms_hook)(reln->rd_id, blockNum); ++ } ++ + buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence, + forkNum, blockNum, mode, strategy, &hit); + if (hit) +diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c +index 189342e..c5b218e 100644 +--- a/src/backend/storage/smgr/smgr.c ++++ b/src/backend/storage/smgr/smgr.c +@@ -90,7 +90,11 @@ static const f_smgr smgrsw[] = { + + static const int NSmgr = lengthof(smgrsw); + +- ++/* ++ * Hook for plugins to collect statistics from smgr functions ++ * One example is to record the active relfilenode information. ++ */ ++SmgrStat_hook_type SmgrStat_hook = NULL; + /* + * Each backend has a hashtable that stores all extant SMgrRelation objects. + * In addition, "unowned" SMgrRelation objects are chained together in a list. +@@ -411,6 +415,11 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) + isRedo); + + smgrsw[reln->smgr_which].smgr_create(reln, forknum, isRedo); ++ ++ if (SmgrStat_hook) ++ { ++ (*SmgrStat_hook)(reln); ++ } + } + + /* +@@ -617,6 +626,11 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + { + smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum, + buffer, skipFsync); ++ ++ if (SmgrStat_hook) ++ { ++ (*SmgrStat_hook)(reln); ++ } + } + + /* +@@ -720,6 +734,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) + * Do the truncation. + */ + smgrsw[reln->smgr_which].smgr_truncate(reln, forknum, nblocks); ++ ++ if (SmgrStat_hook) ++ { ++ (*SmgrStat_hook)(reln); ++ } + } + + /* +diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h +index 3cce390..153a7d3 100644 +--- a/src/include/storage/bufmgr.h ++++ b/src/include/storage/bufmgr.h +@@ -160,6 +160,14 @@ extern PGDLLIMPORT int32 *LocalRefCount; + #define BufferGetPage(buffer) ((Page)BufferGetBlock(buffer)) + + /* ++ * Hook for plugins to check permissions when doing a buffer extend. ++ * One example is to check whether there is additional disk quota for ++ * the table to be inserted. ++ */ ++typedef bool (*BufferExtendCheckPerms_hook_type) (Oid, BlockNumber); ++extern PGDLLIMPORT BufferExtendCheckPerms_hook_type BufferExtendCheckPerms_hook; ++ ++/* + * prototypes for functions in bufmgr.c + */ + extern bool ComputeIoConcurrency(int io_concurrency, double *target); +diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h +index c843bbc..918e590 100644 +--- a/src/include/storage/smgr.h ++++ b/src/include/storage/smgr.h +@@ -144,5 +144,11 @@ extern void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, + extern void ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum); + extern void ForgetDatabaseFsyncRequests(Oid dbid); + extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo); ++/* ++ * Hook for plugins to collect statistics from smgr functions ++ * One example is to record the active relfilenode information. ++ */ ++typedef void (*SmgrStat_hook_type)(SMgrRelation sreln); ++extern PGDLLIMPORT SmgrStat_hook_type SmgrStat_hook; + + #endif /* SMGR_H */ +-- +1.8.3.1 + From d745b67809d171e5e82ac77aa619bb2f7aed0bf7 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 13 Nov 2018 16:26:05 +0800 Subject: [PATCH 004/330] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 03918732227..f4d6ac0f701 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,8 @@ shared_preload_libraries = 'diskquota' diskquota.monitor_databases = 'postgres' # set naptime (second) to refresh the disk quota stats periodically diskquota.naptime = 2 +# restart database to load preload library. +pg_ctl restart ``` 4. Create diskquota extension in monitored database. ``` From 77bd5ad126f68a4961bc692b6dcdd7d74aa7d618 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Wed, 21 Nov 2018 10:32:01 +0800 Subject: [PATCH 005/330] Fix gitignore to allow push answer file. --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index c6127b38c1a..c0b8768012f 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ # Executables *.exe -*.out +a.out *.app *.i*86 *.x86_64 From b40117111d17322100f323da3f64fde299b4cc17 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 22 Nov 2018 14:33:23 +0800 Subject: [PATCH 006/330] Refactor active table logic. Put all the active table code into one file activetable.c Replace relfilenode convert to Oid code by built-in function Add answer files. Co-authored-by: Haozhou Wang Co-authored-by: Hubert Zhang --- activetable.c | 145 ++++++++++++++++++++--------------- activetable.h | 4 +- data/generate_insert.py | 6 -- data/generate_table.py | 8 -- diskquota.c | 9 +-- enforcement.c | 2 - expected/clean.out | 4 + expected/dummy.out | 0 expected/empty.out | 0 expected/prepare.out | 36 +++++++++ expected/test_column.out | 32 ++++++++ expected/test_copy.out | 27 +++++++ expected/test_drop_table.out | 29 +++++++ expected/test_partition.out | 49 ++++++++++++ expected/test_rename.out | 60 +++++++++++++++ expected/test_reschema.out | 34 ++++++++ expected/test_role.out | 40 ++++++++++ expected/test_schema.out | 41 ++++++++++ expected/test_temp_role.out | 31 ++++++++ expected/test_toast.out | 31 ++++++++ expected/test_truncate.out | 36 +++++++++ expected/test_update.out | 23 ++++++ expected/test_vacuum.out | 38 +++++++++ quotamodel.c | 81 ++++--------------- 24 files changed, 611 insertions(+), 155 deletions(-) delete mode 100644 data/generate_insert.py delete mode 100644 data/generate_table.py create mode 100644 expected/clean.out create mode 100644 expected/dummy.out create mode 100644 expected/empty.out create mode 100644 expected/prepare.out create mode 100644 expected/test_column.out create mode 100644 expected/test_copy.out create mode 100644 expected/test_drop_table.out create mode 100644 expected/test_partition.out create mode 100644 expected/test_rename.out create mode 100644 expected/test_reschema.out create mode 100644 expected/test_role.out create mode 100644 expected/test_schema.out create mode 100644 expected/test_temp_role.out create mode 100644 expected/test_toast.out create mode 100644 expected/test_truncate.out create mode 100644 expected/test_update.out create mode 100644 expected/test_vacuum.out diff --git a/activetable.c b/activetable.c index e3c8e2dd78f..9748eef52d6 100644 --- a/activetable.c +++ b/activetable.c @@ -6,8 +6,6 @@ * * Copyright (C) 2013, PostgreSQL Global Development Group * - * IDENTIFICATION - * contrib/diskquota/activetable.c * * ------------------------------------------------------------------------- */ @@ -24,20 +22,21 @@ #include "storage/smgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/relfilenodemap.h" #include "activetable.h" -#include "diskquota.h" HTAB *active_tables_map = NULL; static SmgrStat_hook_type prev_SmgrStat_hook = NULL; -static ScanKeyData relfilenode_skey[2]; static void report_active_table_SmgrStat(SMgrRelation reln); -HTAB* get_active_tables(void); +static HTAB* get_active_tables_stats(void); +static HTAB* get_all_tables_stats(void); + void init_active_table_hook(void); void init_shm_worker_active_tables(void); void init_lock_active_tables(void); -void init_relfilenode_key(void); +HTAB* pg_fetch_active_tables(bool); /* * Register smgr hook to detect active table. @@ -86,31 +85,75 @@ void init_lock_active_tables(void) } /* - * Init relfilenode key to index search table oid - * given relfilenode and tablespace. + * Fetch active table file size statistics. + * If force is true, then fetch all the tables. */ -void -init_relfilenode_key(void) +HTAB* pg_fetch_active_tables(bool force) +{ + if (force) + { + return get_all_tables_stats(); + } + else + { + return get_active_tables_stats(); + } +} + +/* + * Get the table size statistics for all the tables + */ +static HTAB* +get_all_tables_stats() { - int i; + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HeapTuple tuple; + Relation classRel; + HeapScanDesc relScan; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; - /* build skey */ - MemSet(&relfilenode_skey, 0, sizeof(relfilenode_skey)); + local_table_stats_map = hash_create("local table map with table size info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + classRel = heap_open(RelationRelationId, AccessShareLock); + relScan = heap_beginscan_catalog(classRel, 0, NULL); - for (i = 0; i < 2; i++) + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { - fmgr_info_cxt(F_OIDEQ, - &relfilenode_skey[i].sk_func, - CacheMemoryContext); - relfilenode_skey[i].sk_strategy = BTEqualStrategyNumber; - relfilenode_skey[i].sk_subtype = InvalidOid; - relfilenode_skey[i].sk_collation = InvalidOid; + Oid relOid; + DiskQuotaActiveTableEntry *entry; + + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW) + continue; + relOid = HeapTupleGetOid(tuple); + + /* ignore system table*/ + if (relOid < FirstNormalObjectId) + continue; + + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &relOid, HASH_ENTER, NULL); + + entry->tableoid = relOid; + entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(relOid))); + } - relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace; - relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode; -} + heap_endscan(relScan); + heap_close(classRel, AccessShareLock); + return local_table_stats_map; +} /* * Get local active table with table oid and table size info. * This function first copies active table map from shared memory @@ -118,7 +161,8 @@ init_relfilenode_key(void) * the local map and find corresponding table oid and table file * size. Finnaly stores them into local active table map and return. */ -HTAB* get_active_tables() +static HTAB* +get_active_tables_stats() { HASHCTL ctl; HTAB *local_active_table_file_map = NULL; @@ -127,9 +171,6 @@ HTAB* get_active_tables() DiskQuotaActiveTableFileEntry *active_table_file_entry; DiskQuotaActiveTableEntry *active_table_entry; - Relation relation; - HeapTuple tuple; - SysScanDesc relScan; Oid relOid; memset(&ctl, 0, sizeof(ctl)); @@ -178,7 +219,6 @@ HTAB* get_active_tables() &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - relation = heap_open(RelationRelationId, AccessShareLock); /* traverse local active table map and calculate their file size. */ hash_seq_init(&iter, local_active_table_file_map); /* scan whole local map, get the oid of each table and calculate the size of them */ @@ -186,48 +226,27 @@ HTAB* get_active_tables() { Size tablesize; bool found; - ScanKeyData skey[2]; - Oid reltablespace; - reltablespace = active_table_file_entry->tablespaceoid; - - /* pg_class will show 0 when the value is actually MyDatabaseTableSpace */ - if (reltablespace == MyDatabaseTableSpace) - reltablespace = 0; - - /* set scan arguments */ - memcpy(skey, relfilenode_skey, sizeof(skey)); - skey[0].sk_argument = ObjectIdGetDatum(reltablespace); - skey[1].sk_argument = ObjectIdGetDatum(active_table_file_entry->relfilenode); - relScan = systable_beginscan(relation, - ClassTblspcRelfilenodeIndexId, - true, - NULL, - 2, - skey); - - tuple = systable_getnext(relScan); - - if (!HeapTupleIsValid(tuple)) + relOid = RelidByRelfilenode(active_table_file_entry->tablespaceoid, active_table_file_entry->relfilenode); + + //TODO replace DirectFunctionCall1 by a new total relation size function, which could handle Invalid relOid + /* avoid to generate ERROR if relOid is not existed (i.e. table has been droped) */ + PG_TRY(); { - systable_endscan(relScan); - continue; + tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(relOid))); } - relOid = HeapTupleGetOid(tuple); - - /* Call function directly to get size of table by oid */ - tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); - - active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); - if (active_table_entry) + PG_CATCH(); { - active_table_entry->tableoid = relOid; - active_table_entry->tablesize = tablesize; + FlushErrorState(); + tablesize = 0; } - systable_endscan(relScan); + PG_END_TRY(); + active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); + active_table_entry->tableoid = relOid; + active_table_entry->tablesize = tablesize; } elog(DEBUG1, "active table number is:%ld", hash_get_num_entries(local_active_table_file_map)); - heap_close(relation, AccessShareLock); hash_destroy(local_active_table_file_map); return local_active_table_stats_map; } diff --git a/activetable.h b/activetable.h index 64b721e3d07..76577b04de8 100644 --- a/activetable.h +++ b/activetable.h @@ -1,7 +1,6 @@ #ifndef ACTIVE_TABLE_H #define ACTIVE_TABLE_H -#include "storage/lwlock.h" #include "diskquota.h" /* Cache to detect the active table list */ @@ -19,9 +18,8 @@ typedef struct DiskQuotaActiveTableEntry } DiskQuotaActiveTableEntry; -extern HTAB* get_active_tables(void); +extern HTAB* pg_fetch_active_tables(bool); extern void init_active_table_hook(void); -extern void init_relfilenode_key(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); diff --git a/data/generate_insert.py b/data/generate_insert.py deleted file mode 100644 index 34111804af5..00000000000 --- a/data/generate_insert.py +++ /dev/null @@ -1,6 +0,0 @@ -# generate insert statement -f = open('insertfile', 'w') -f.write("set search_path to perfs;\n"); -for i in range(1,2000): - f.write("insert into a"+str(i)+" select generate_series(1,3000);\n"); -f.close() diff --git a/data/generate_table.py b/data/generate_table.py deleted file mode 100644 index 88a4db938cb..00000000000 --- a/data/generate_table.py +++ /dev/null @@ -1,8 +0,0 @@ -#generate create table statement -f = open('tablefile', 'w') -f.write("create schema perfs;\nset search_path to perfs;\n"); -sql = "create table a" -sql2 = " (i int);\n" -for i in range(1,100000): - f.write(sql+str(i)+sql2+ "insert into a"+str(i)+" values(2);\n"); -f.close() diff --git a/diskquota.c b/diskquota.c index 546455fc640..802a753cb78 100644 --- a/diskquota.c +++ b/diskquota.c @@ -9,8 +9,6 @@ * * Copyright (C) 2013, PostgreSQL Global Development Group * - * IDENTIFICATION - * contrib/diskquota/diskquota.c * * ------------------------------------------------------------------------- */ @@ -266,9 +264,6 @@ disk_quota_launcher_main(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); - /* Connect to our database */ - //BackgroundWorkerInitializeConnection("postgres", NULL, 0); - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); @@ -279,7 +274,7 @@ disk_quota_launcher_main(Datum main_arg) HASH_ELEM); dblist = get_database_list(); - + elog(LOG,"diskquota launcher started"); foreach(cell, dblist) { char *db_name; @@ -468,6 +463,7 @@ start_worker(char* dbname) bool found; DiskQuotaWorkerEntry* workerentry; + memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; @@ -477,6 +473,7 @@ start_worker(char* dbname) snprintf(worker.bgw_name, BGW_MAXLEN, "%s", dbname); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; + worker.bgw_main_arg = (Datum) 0; if (!RegisterDynamicBackgroundWorker(&worker, &handle)) return -1; diff --git a/enforcement.c b/enforcement.c index 72af5ba1550..e9737b7f5c2 100644 --- a/enforcement.c +++ b/enforcement.c @@ -7,8 +7,6 @@ * * Copyright (C) 2013, PostgreSQL Global Development Group * - * IDENTIFICATION - * contrib/diskquota/enforcement.c * * ------------------------------------------------------------------------- */ diff --git a/expected/clean.out b/expected/clean.out new file mode 100644 index 00000000000..b8578682904 --- /dev/null +++ b/expected/clean.out @@ -0,0 +1,4 @@ +drop table badquota.t1; +drop role testbody; +drop schema badquota; +drop extension diskquota; diff --git a/expected/dummy.out b/expected/dummy.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/expected/empty.out b/expected/empty.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/expected/prepare.out b/expected/prepare.out new file mode 100644 index 00000000000..647fb8dafd8 --- /dev/null +++ b/expected/prepare.out @@ -0,0 +1,36 @@ +create extension diskquota; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +\! pg_ctl -D /tmp/pg_diskquota_test/data reload +server signaled +\! cp data/csmall.txt /tmp/csmall.txt +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- prepare a schema that has reached quota limit +create schema badquota; +select diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create role testbody; +create table badquota.t1(i int); +alter table badquota.t1 owner to testbody; +insert into badquota.t1 select generate_series(0, 50000); +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +insert into badquota.t1 select generate_series(0, 10); +ERROR: schema's disk space quota exceeded with name:badquota diff --git a/expected/test_column.out b/expected/test_column.out new file mode 100644 index 00000000000..5a6762b737e --- /dev/null +++ b/expected/test_column.out @@ -0,0 +1,32 @@ +-- Test alter table add column +create schema scolumn; +select diskquota.set_schema_quota('scolumn', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to scolumn; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +create table a2(i int); +insert into a2 select generate_series(1,20000); +insert into a2 select generate_series(1,10); +ALTER TABLE a2 ADD COLUMN j varchar(50); +update a2 set j = 'add value for column j'; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert failed after add column +insert into a2 select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:scolumn +drop table a2; +reset search_path; +drop schema scolumn; diff --git a/expected/test_copy.out b/expected/test_copy.out new file mode 100644 index 00000000000..5c00c476016 --- /dev/null +++ b/expected/test_copy.out @@ -0,0 +1,27 @@ +-- Test copy +create schema s3; +select diskquota.set_schema_quota('s3', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s3; +create table c (i int); +copy c from '/tmp/csmall.txt'; +-- expect failed +insert into c select generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:s3 +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- select pg_total_table_size('c'); +-- expect copy fail +copy c from '/tmp/csmall.txt'; +ERROR: schema's disk space quota exceeded with name:s3 +drop table c; +reset search_path; +drop schema s3; diff --git a/expected/test_drop_table.out b/expected/test_drop_table.out new file mode 100644 index 00000000000..354be6bee55 --- /dev/null +++ b/expected/test_drop_table.out @@ -0,0 +1,29 @@ +-- Test Drop table +create schema sdrtbl; +select diskquota.set_schema_quota('sdrtbl', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to sdrtbl; +create table a(i int); +create table a2(i int); +insert into a select generate_series(1,100); +-- expect insert fail +insert into a select generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:sdrtbl +-- expect insert fail +insert into a2 select generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:sdrtbl +drop table a; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +insert into a2 select generate_series(1,100); +drop table a2; +reset search_path; +drop schema sdrtbl; diff --git a/expected/test_partition.out b/expected/test_partition.out new file mode 100644 index 00000000000..cea8f4cf21e --- /dev/null +++ b/expected/test_partition.out @@ -0,0 +1,49 @@ +-- Test partition table +create schema s8; +select diskquota.set_schema_quota('s8', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s8; +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +)PARTITION BY RANGE (logdate); +CREATE TABLE measurement_y2006m02 PARTITION OF measurement + FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); +CREATE TABLE measurement_y2006m03 PARTITION OF measurement + FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); +insert into measurement select generate_series(1,15000), '2006-02-01' ,1,1; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +insert into measurement select 1, '2006-02-01' ,1,1; +-- expect insert fail +insert into measurement select generate_series(1,100000000), '2006-03-02' ,1,1; +ERROR: schema's disk space quota exceeded with name:s8 +-- expect insert fail +insert into measurement select 1, '2006-02-01' ,1,1; +ERROR: schema's disk space quota exceeded with name:s8 +-- expect insert fail +insert into measurement select 1, '2006-03-03' ,1,1; +ERROR: schema's disk space quota exceeded with name:s8 +delete from measurement where logdate='2006-03-02'; +vacuum full measurement; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +insert into measurement select 1, '2006-02-01' ,1,1; +insert into measurement select 1, '2006-03-03' ,1,1; +drop table measurement; +reset search_path; +drop schema s8; diff --git a/expected/test_rename.out b/expected/test_rename.out new file mode 100644 index 00000000000..d089dfdb77d --- /dev/null +++ b/expected/test_rename.out @@ -0,0 +1,60 @@ +-- test rename schema +create schema srs1; +select diskquota.set_schema_quota('srs1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to srs1; +create table a(i int); +-- expect insert fail +insert into a select generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:srs1 +-- expect insert fail +insert into a select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs1 +alter schema srs1 rename to srs2; +set search_path to srs2; +-- expect insert fail +insert into a select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs2 +-- test rename table +alter table a rename to a2; +-- expect insert fail +insert into a2 select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs2 +drop table a2; +reset search_path; +drop schema srs2; +-- test rename role +create schema srr1; +create role srerole nologin; +select diskquota.set_role_quota('srerole', '1MB'); + set_role_quota +---------------- + +(1 row) + +set search_path to srr1; +create table a(i int); +alter table a owner to srerole; +-- expect insert fail +insert into a select generate_series(1,100000000); +ERROR: role's disk space quota exceeded with name:srerole +-- expect insert fail +insert into a select generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole +alter role srerole rename to srerole2; +-- expect insert fail +insert into a select generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole2 +-- test rename table +alter table a rename to a2; +-- expect insert fail +insert into a2 select generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole2 +drop table a2; +drop role srerole2; +reset search_path; +drop schema srr1; diff --git a/expected/test_reschema.out b/expected/test_reschema.out new file mode 100644 index 00000000000..2c2c0faf792 --- /dev/null +++ b/expected/test_reschema.out @@ -0,0 +1,34 @@ +-- Test re-set_schema_quota +create schema srE; +select diskquota.set_schema_quota('srE', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to srE; +create table a(i int); +-- expect insert fail +insert into a select generate_series(1,1000000000); +ERROR: schema's disk space quota exceeded with name:sre +-- expect insert fail when exceed quota limit +insert into a select generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name:sre +-- set schema quota larger +select diskquota.set_schema_quota('srE', '1 GB'); + set_schema_quota +------------------ + +(1 row) + +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +insert into a select generate_series(1,1000); +drop table a; +reset search_path; +drop schema srE; diff --git a/expected/test_role.out b/expected/test_role.out new file mode 100644 index 00000000000..4e998a6fc84 --- /dev/null +++ b/expected/test_role.out @@ -0,0 +1,40 @@ +-- Test role quota +create schema srole; +set search_path to srole; +CREATE role u1 NOLOGIN; +CREATE role u2 NOLOGIN; +CREATE TABLE b (t text); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t text); +ALTER TABLE b2 OWNER TO u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into b select generate_series(1,100); +-- expect insert fail +insert into b select generate_series(1,100000000); +ERROR: role's disk space quota exceeded with name:u1 +-- expect insert fail +insert into b select generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u1 +-- expect insert fail +insert into b2 select generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u1 +alter table b owner to u2; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +insert into b select generate_series(1,100); +-- expect insert succeed +insert into b2 select generate_series(1,100); +drop table b, b2; +drop role u1, u2; +reset search_path; +drop schema srole; diff --git a/expected/test_schema.out b/expected/test_schema.out new file mode 100644 index 00000000000..a512464fb47 --- /dev/null +++ b/expected/test_schema.out @@ -0,0 +1,41 @@ +-- Test schema +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s1; +create table a(i int); +insert into a select generate_series(1,100); +-- expect insert fail +insert into a select generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:s1 +-- expect insert fail +insert into a select generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +create table a2(i int); +-- expect insert fail +insert into a2 select generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +-- Test alter table set schema +create schema s2; +alter table s1.a set schema s2; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +insert into a2 select generate_series(1,20000); +-- expect insert succeed +insert into s2.a select generate_series(1,20000); +alter table s2.a set schema badquota; +-- expect failed +insert into badquota.a select generate_series(0, 100); +ERROR: schema's disk space quota exceeded with name:badquota +reset search_path; +drop table s1.a2, badquota.a; +drop schema s1, s2; diff --git a/expected/test_temp_role.out b/expected/test_temp_role.out new file mode 100644 index 00000000000..2b800d2b055 --- /dev/null +++ b/expected/test_temp_role.out @@ -0,0 +1,31 @@ +-- Test temp table restrained by role id +create schema strole; +create role u3temp nologin; +set search_path to strole; +select diskquota.set_role_quota('u3temp', '1MB'); + set_role_quota +---------------- + +(1 row) + +create table a(i int); +alter table a owner to u3temp; +create temp table ta(i int); +alter table ta owner to u3temp; +-- expected failed: fill temp table +insert into ta select generate_series(1,100000000); +ERROR: role's disk space quota exceeded with name:u3temp +-- expected failed: +insert into a select generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u3temp +drop table ta; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +insert into a select generate_series(1,100); +drop table a; +reset search_path; +drop schema strole; diff --git a/expected/test_toast.out b/expected/test_toast.out new file mode 100644 index 00000000000..454cf3f50aa --- /dev/null +++ b/expected/test_toast.out @@ -0,0 +1,31 @@ +-- Test toast +create schema s5; +select diskquota.set_schema_quota('s5', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s5; +CREATE TABLE a5 (message text); +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,10000)) +FROM generate_series(1,10); +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert toast fail +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,100000)) +FROM generate_series(1,1000000); +ERROR: schema's disk space quota exceeded with name:s5 +drop table a5; +reset search_path; +drop schema s5; diff --git a/expected/test_truncate.out b/expected/test_truncate.out new file mode 100644 index 00000000000..4c1ad13606f --- /dev/null +++ b/expected/test_truncate.out @@ -0,0 +1,36 @@ +-- Test truncate +create schema s7; +select diskquota.set_schema_quota('s7', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s7; +create table a (i int); +create table b (i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +insert into a select generate_series(1,30); +ERROR: schema's disk space quota exceeded with name:s7 +insert into b select generate_series(1,30); +ERROR: schema's disk space quota exceeded with name:s7 +truncate table a; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +insert into a select generate_series(1,30); +insert into b select generate_series(1,30); +drop table a, b; +reset search_path; +drop schema s7; diff --git a/expected/test_update.out b/expected/test_update.out new file mode 100644 index 00000000000..cffde58595d --- /dev/null +++ b/expected/test_update.out @@ -0,0 +1,23 @@ +-- Test Update +create schema s4; +select diskquota.set_schema_quota('s4', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s4; +create table a(i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect update fail. +update a set i = 100; +ERROR: schema's disk space quota exceeded with name:s4 +drop table a; +reset search_path; +drop schema s4; diff --git a/expected/test_vacuum.out b/expected/test_vacuum.out new file mode 100644 index 00000000000..d5256b0a640 --- /dev/null +++ b/expected/test_vacuum.out @@ -0,0 +1,38 @@ +-- Test vacuum full +create schema s6; +select diskquota.set_schema_quota('s6', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to s6; +create table a (i int); +create table b (i int); +insert into a select generate_series(1,50000); +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +insert into a select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:s6 +-- expect insert fail +insert into b select generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:s6 +delete from a where i > 10; +vacuum full a; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +insert into a select generate_series(1,10); +insert into b select generate_series(1,10); +drop table a, b; +reset search_path; +drop schema s6; diff --git a/quotamodel.c b/quotamodel.c index 2f5a6f2fd98..38f868b977f 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -7,8 +7,6 @@ * * Copyright (C) 2013, PostgreSQL Global Development Group * - * IDENTIFICATION - * contrib/diskquota/quotamodel.c * * ------------------------------------------------------------------------- */ @@ -124,7 +122,6 @@ static void calculate_table_disk_usage(bool force); static void calculate_schema_disk_usage(void); static void calculate_role_disk_usage(void); static void flush_local_black_map(void); -static void reset_local_black_map(void); static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type); static void update_namespace_map(Oid namespaceoid, int64 updatesize); static void update_role_map(Oid owneroid, int64 updatesize); @@ -231,7 +228,7 @@ init_disk_quota_model(void) hash_ctl.hash = oid_hash; table_size_map = hash_create("TableSizeEntry map", - 1024, + 1024 * 8, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); @@ -283,7 +280,6 @@ init_disk_quota_model(void) MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - init_relfilenode_key(); } /* @@ -316,8 +312,6 @@ refresh_disk_quota_model(bool force) static void refresh_disk_quota_usage(bool force) { - /* copy shared black map to local black map */ - reset_local_black_map(); /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(force); calculate_schema_disk_usage(); @@ -363,6 +357,7 @@ flush_local_black_map(void) blackentry->targettype = localblackentry->keyitem.targettype; } } + localblackentry->isexceeded = false; } else { @@ -370,50 +365,14 @@ flush_local_black_map(void) (void) hash_search(disk_quota_black_map, (void *) &localblackentry->keyitem, HASH_REMOVE, NULL); + (void) hash_search(local_disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_REMOVE, NULL); } } LWLockRelease(black_map_shm_lock->lock); } -/* Fetch the new blacklist from shared blacklist at each refresh iteration. */ -static void -reset_local_black_map(void) -{ - HASH_SEQ_STATUS iter; - LocalBlackMapEntry* localblackentry; - BlackMapEntry* blackentry; - bool found; - /* clear entries in local black map*/ - hash_seq_init(&iter, local_disk_quota_black_map); - - while ((localblackentry = hash_seq_search(&iter)) != NULL) - { - (void) hash_search(local_disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_REMOVE, NULL); - } - - /* get black map copy from shared black map */ - LWLockAcquire(black_map_shm_lock->lock, LW_SHARED); - hash_seq_init(&iter, disk_quota_black_map); - while ((blackentry = hash_seq_search(&iter)) != NULL) - { - /* only reset entries for current db */ - if (blackentry->databaseoid == MyDatabaseId) - { - localblackentry = (LocalBlackMapEntry*) hash_search(local_disk_quota_black_map, - (void *) blackentry, - HASH_ENTER, &found); - if (!found) - { - localblackentry->isexceeded = false; - } - } - } - LWLockRelease(black_map_shm_lock->lock); - -} - /* * Compare the disk quota limit and current usage of a database object. * Put them into local blacklist if quota limit is exceeded. @@ -549,11 +508,11 @@ static void calculate_table_disk_usage(bool force) { bool found; - bool active_tbl_found; + bool active_tbl_found = false; Relation classRel; HeapTuple tuple; HeapScanDesc relScan; - TableSizeEntry *tsentry; + TableSizeEntry *tsentry = NULL; Oid relOid; HASH_SEQ_STATUS iter; HTAB *local_active_table_stat_map; @@ -562,7 +521,7 @@ calculate_table_disk_usage(bool force) classRel = heap_open(RelationRelationId, AccessShareLock); relScan = heap_beginscan_catalog(classRel, 0, NULL); - local_active_table_stat_map = get_active_tables(); + local_active_table_stat_map = pg_fetch_active_tables(force); /* unset is_exist flag for tsentry in table_size_map*/ hash_seq_init(&iter, table_size_map); @@ -599,7 +558,7 @@ calculate_table_disk_usage(bool force) active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &relOid, HASH_FIND, &active_tbl_found); /* skip to recalculate the tables which are not in active list and not at initializatio stage*/ - if(active_tbl_found || force) + if(active_tbl_found) { /* namespace and owner may be changed since last check*/ @@ -609,28 +568,17 @@ calculate_table_disk_usage(bool force) tsentry->reloid = relOid; tsentry->namespaceoid = classForm->relnamespace; tsentry->owneroid = classForm->relowner; - if (!force) - { - tsentry->totalsize = (int64) active_table_entry->tablesize; - } - else - { - tsentry->totalsize = DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, - ObjectIdGetDatum(relOid))); - } + tsentry->totalsize = (int64) active_table_entry->tablesize; update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); update_role_map(tsentry->owneroid, tsentry->totalsize); } else { /* if not new table in table_size_map, it must be in active table list */ - if (active_tbl_found) - { - int64 oldtotalsize = tsentry->totalsize; - tsentry->totalsize = (int64) active_table_entry->tablesize; - update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); - update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); - } + int64 oldtotalsize = tsentry->totalsize; + tsentry->totalsize = (int64) active_table_entry->tablesize; + update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); + update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); } } @@ -896,4 +844,3 @@ quota_check_common(Oid reloid) LWLockRelease(black_map_shm_lock->lock); return true; } - From 475d5cc2e1dd2bed599189d75909e1f14ec53261 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Mon, 26 Nov 2018 14:57:27 +0800 Subject: [PATCH 007/330] Update hook function points in postgres (#1) 1. update hook function points, add smgrdounlinkall hook point 2. refactor code for pg master branch --- activetable.c | 56 ++++++++++++++-- enforcement.c | 26 ++++---- patch/pg_hooks.patch | 148 +++++++++++++++++++++++++++++-------------- quotamodel.c | 2 +- 4 files changed, 161 insertions(+), 71 deletions(-) diff --git a/activetable.c b/activetable.c index 9748eef52d6..6da44b364e4 100644 --- a/activetable.c +++ b/activetable.c @@ -27,7 +27,20 @@ #include "activetable.h" HTAB *active_tables_map = NULL; -static SmgrStat_hook_type prev_SmgrStat_hook = NULL; +static smgrcreate_hook_type prev_smgrcreate_hook = NULL; +static smgrextend_hook_type prev_smgrextend_hook = NULL; +static smgrtruncate_hook_type prev_smgrtruncate_hook = NULL; +static void active_table_hook_smgrcreate(SMgrRelation reln, + ForkNumber forknum, + bool isRedo); +static void active_table_hook_smgrextend(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum, + char *buffer, + bool skipFsync); +static void active_table_hook_smgrtruncate(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum); static void report_active_table_SmgrStat(SMgrRelation reln); static HTAB* get_active_tables_stats(void); @@ -44,8 +57,40 @@ HTAB* pg_fetch_active_tables(bool); void init_active_table_hook(void) { - prev_SmgrStat_hook = SmgrStat_hook; - SmgrStat_hook = report_active_table_SmgrStat; + prev_smgrcreate_hook = smgrcreate_hook; + smgrcreate_hook = active_table_hook_smgrcreate; + + prev_smgrextend_hook = smgrextend_hook; + smgrextend_hook = active_table_hook_smgrextend; + + prev_smgrtruncate_hook = smgrtruncate_hook; + smgrtruncate_hook = active_table_hook_smgrtruncate; +} + +static void +active_table_hook_smgrcreate(SMgrRelation reln, + pg_attribute_unused() ForkNumber forknum, + pg_attribute_unused() bool isRedo) +{ + report_active_table_SmgrStat(reln); +} + +static void +active_table_hook_smgrextend(SMgrRelation reln, + pg_attribute_unused() ForkNumber forknum, + pg_attribute_unused() BlockNumber blocknum, + pg_attribute_unused() char *buffer, + pg_attribute_unused() bool skipFsync) +{ + report_active_table_SmgrStat(reln); +} + +static void +active_table_hook_smgrtruncate(SMgrRelation reln, + pg_attribute_unused() ForkNumber forknum, + pg_attribute_unused() BlockNumber blocknum) +{ + report_active_table_SmgrStat(reln); } /* @@ -135,7 +180,7 @@ get_all_tables_stats() if (classForm->relkind != RELKIND_RELATION && classForm->relkind != RELKIND_MATVIEW) continue; - relOid = HeapTupleGetOid(tuple); + relOid = classForm->oid; /* ignore system table*/ if (relOid < FirstNormalObjectId) @@ -264,9 +309,6 @@ report_active_table_SmgrStat(SMgrRelation reln) DiskQuotaActiveTableFileEntry item; bool found = false; - if (prev_SmgrStat_hook) - (*prev_SmgrStat_hook)(reln); - MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); item.dbid = reln->smgr_rnode.node.dbNode; item.relfilenode = reln->smgr_rnode.node.relNode; diff --git a/enforcement.c b/enforcement.c index e9737b7f5c2..02cadf355b0 100644 --- a/enforcement.c +++ b/enforcement.c @@ -14,14 +14,17 @@ #include "executor/executor.h" #include "storage/bufmgr.h" +#include "utils/rel.h" #include "diskquota.h" static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); -static bool quota_check_ReadBufferExtendCheckPerms(Oid reloid, BlockNumber blockNum); +static bool quota_check_ReadBufferExtendCheckPerms(Relation reln, ForkNumber forkNum, + BlockNumber blockNum, ReadBufferMode mode, + BufferAccessStrategy strategy); static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook; -static BufferExtendCheckPerms_hook_type prev_BufferExtendCheckPerms_hook; +static ReadBufferExtended_hook_type prev_ReadBufferExtended_hook; /* * Initialize enforcement hooks. @@ -34,8 +37,8 @@ init_disk_quota_enforcement(void) ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; /* enforcement hook during query is loading data*/ - prev_BufferExtendCheckPerms_hook = BufferExtendCheckPerms_hook; - BufferExtendCheckPerms_hook = quota_check_ReadBufferExtendCheckPerms; + prev_ReadBufferExtended_hook = ReadBufferExtended_hook; + ReadBufferExtended_hook = quota_check_ReadBufferExtendCheckPerms; } /* @@ -75,19 +78,14 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) * you try to extend a buffer page, and the quota has been exceeded. */ static bool -quota_check_ReadBufferExtendCheckPerms(Oid reloid, BlockNumber blockNum) +quota_check_ReadBufferExtendCheckPerms(Relation reln, pg_attribute_unused() ForkNumber forkNum, + pg_attribute_unused() BlockNumber blockNum, + pg_attribute_unused() ReadBufferMode mode, + pg_attribute_unused() BufferAccessStrategy strategy) { - bool isExtend; - - isExtend = (blockNum == P_NEW); - /* if not buffer extend, we could skip quota limit check*/ - if (!isExtend) - { - return true; - } /* Perform the check as the relation's owner and namespace */ - quota_check_common(reloid); + quota_check_common(reln->rd_id); return true; } diff --git a/patch/pg_hooks.patch b/patch/pg_hooks.patch index 0f67ec41cb5..61dacc784ed 100644 --- a/patch/pg_hooks.patch +++ b/patch/pg_hooks.patch @@ -1,23 +1,24 @@ -From ff8686c23badd5602bfb997c4fe761c19fa66f9e Mon Sep 17 00:00:00 2001 +From 3d275c78b304b308d288bd227f6dcab45dc5f595 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 6 Nov 2018 06:51:22 +0000 Subject: [PATCH] Add hooks for diskquota extension. -Add BufferExtendCheckPerms_hook to support quota enforcement -Add SmgrStat_hook to detect active relfilenodes. +Add ReadBufferExtend_hook() and smgr*_hook() +hook points to extend logic of storage +management. Co-authored-by: Haozhou Wang Co-authored-by: Hubert Zhang Co-authored-by: Hao Wu --- src/backend/storage/buffer/bufmgr.c | 14 ++++++++++++++ - src/backend/storage/smgr/smgr.c | 21 ++++++++++++++++++++- - src/include/storage/bufmgr.h | 8 ++++++++ - src/include/storage/smgr.h | 6 ++++++ - 4 files changed, 48 insertions(+), 1 deletion(-) + src/backend/storage/smgr/smgr.c | 33 +++++++++++++++++++++++++++++++++ + src/include/storage/bufmgr.h | 10 ++++++++++ + src/include/storage/smgr.h | 18 ++++++++++++++++++ + 4 files changed, 75 insertions(+) diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c -index 01eabe5..d977350 100644 +index 01eabe5706..5499495506 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -104,6 +104,13 @@ typedef struct CkptTsStatus @@ -29,7 +30,7 @@ index 01eabe5..d977350 100644 + * One example is to check whether there is additional disk quota for + * the table to be inserted. + */ -+BufferExtendCheckPerms_hook_type BufferExtendCheckPerms_hook = NULL; ++ReadBufferExtended_hook_type ReadBufferExtended_hook = NULL; + /* GUC variables */ bool zero_damaged_pages = false; @@ -39,103 +40,152 @@ index 01eabe5..d977350 100644 */ pgstat_count_buffer_read(reln); + -+ /* check permissions when doing a buffer extend */ -+ if (blockNum == P_NEW && BufferExtendCheckPerms_hook) ++ /* hook function for doing a buffer extend */ ++ if (blockNum == P_NEW && ReadBufferExtended_hook) + { -+ (*BufferExtendCheckPerms_hook)(reln->rd_id, blockNum); ++ (*ReadBufferExtended_hook)(reln, forkNum, blockNum, mode, strategy); + } + buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence, forkNum, blockNum, mode, strategy, &hit); if (hit) diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c -index 189342e..c5b218e 100644 +index 189342ef86..fa36a18e15 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c -@@ -90,7 +90,11 @@ static const f_smgr smgrsw[] = { +@@ -90,6 +90,16 @@ static const f_smgr smgrsw[] = { static const int NSmgr = lengthof(smgrsw); -- +/* -+ * Hook for plugins to collect statistics from smgr functions -+ * One example is to record the active relfilenode information. ++ * Hook for plugins to extend smgr functions. ++ * for example, collect statistics from smgr functions ++ * via recording the active relfilenode information. + */ -+SmgrStat_hook_type SmgrStat_hook = NULL; ++smgrcreate_hook_type smgrcreate_hook = NULL; ++smgrextend_hook_type smgrextend_hook = NULL; ++smgrtruncate_hook_type smgrtruncate_hook = NULL; ++smgrdounlinkall_hook_type smgrdounlinkall_hook = NULL; ++ + /* * Each backend has a hashtable that stores all extant SMgrRelation objects. - * In addition, "unowned" SMgrRelation objects are chained together in a list. -@@ -411,6 +415,11 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) +@@ -397,6 +407,11 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) + if (isRedo && reln->md_num_open_segs[forknum] > 0) + return; + ++ if (smgrcreate_hook) ++ { ++ (*smgrcreate_hook)(reln, forknum, isRedo); ++ } ++ + /* + * We may be using the target table space for the first time in this + * database, so create a per-database subdirectory if needed. +@@ -411,6 +426,7 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) isRedo); smgrsw[reln->smgr_which].smgr_create(reln, forknum, isRedo); + -+ if (SmgrStat_hook) -+ { -+ (*SmgrStat_hook)(reln); -+ } } /* -@@ -617,6 +626,11 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -492,6 +508,11 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo) + if (nrels == 0) + return; + ++ if (smgrdounlinkall_hook) ++ { ++ (*smgrdounlinkall_hook)(rels, nrels, isRedo); ++ } ++ + /* + * create an array which contains all relations to be dropped, and close + * each relation's forks at the smgr level while at it +@@ -615,8 +636,14 @@ void + smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + char *buffer, bool skipFsync) { ++ if (smgrextend_hook) ++ { ++ (*smgrextend_hook)(reln, forknum, blocknum, buffer, skipFsync); ++ } ++ smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum, buffer, skipFsync); + -+ if (SmgrStat_hook) -+ { -+ (*SmgrStat_hook)(reln); -+ } } /* -@@ -720,6 +734,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) +@@ -698,6 +725,11 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum) + void + smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) + { ++ if (smgrtruncate_hook) ++ { ++ (*smgrtruncate_hook)(reln, forknum, nblocks); ++ } ++ + /* + * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will + * just drop them without bothering to write the contents. +@@ -720,6 +752,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) * Do the truncation. */ smgrsw[reln->smgr_which].smgr_truncate(reln, forknum, nblocks); + -+ if (SmgrStat_hook) -+ { -+ (*SmgrStat_hook)(reln); -+ } } /* diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h -index 3cce390..153a7d3 100644 +index 3cce3906a0..f1dcc77bf7 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h -@@ -160,6 +160,14 @@ extern PGDLLIMPORT int32 *LocalRefCount; +@@ -159,6 +159,16 @@ extern PGDLLIMPORT int32 *LocalRefCount; + */ #define BufferGetPage(buffer) ((Page)BufferGetBlock(buffer)) - /* -+ * Hook for plugins to check permissions when doing a buffer extend. ++/* ++ * Hook for plugins to add external logic when doing a buffer extend. + * One example is to check whether there is additional disk quota for + * the table to be inserted. + */ -+typedef bool (*BufferExtendCheckPerms_hook_type) (Oid, BlockNumber); -+extern PGDLLIMPORT BufferExtendCheckPerms_hook_type BufferExtendCheckPerms_hook; ++typedef bool (*ReadBufferExtended_hook_type) (Relation reln, ++ ForkNumber forkNum, BlockNumber blockNum, ++ ReadBufferMode mode, BufferAccessStrategy strategy); ++extern PGDLLIMPORT ReadBufferExtended_hook_type ReadBufferExtended_hook; + -+/* + /* * prototypes for functions in bufmgr.c */ - extern bool ComputeIoConcurrency(int io_concurrency, double *target); diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h -index c843bbc..918e590 100644 +index c843bbc969..d070b3d573 100644 --- a/src/include/storage/smgr.h +++ b/src/include/storage/smgr.h -@@ -144,5 +144,11 @@ extern void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, +@@ -144,5 +144,23 @@ extern void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, extern void ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum); extern void ForgetDatabaseFsyncRequests(Oid dbid); extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo); +/* -+ * Hook for plugins to collect statistics from smgr functions -+ * One example is to record the active relfilenode information. ++ * Hook for plugins to extend smgr functions. ++ * for example, collect statistics from smgr functions ++ * via recording the active relfilenode information. + */ -+typedef void (*SmgrStat_hook_type)(SMgrRelation sreln); -+extern PGDLLIMPORT SmgrStat_hook_type SmgrStat_hook; ++typedef void (*smgrcreate_hook_type)(SMgrRelation reln, ForkNumber forknum, ++ bool isRedo); ++extern PGDLLIMPORT smgrcreate_hook_type smgrcreate_hook; ++typedef void (*smgrextend_hook_type)(SMgrRelation reln, ForkNumber forknum, ++ BlockNumber blocknum, ++ char *buffer, bool skipFsync); ++extern PGDLLIMPORT smgrextend_hook_type smgrextend_hook; ++typedef void (*smgrtruncate_hook_type)(SMgrRelation reln, ForkNumber forknum, ++ BlockNumber nblocks); ++extern PGDLLIMPORT smgrtruncate_hook_type smgrtruncate_hook; ++typedef void (*smgrdounlinkall_hook_type)(SMgrRelation *rels, int nrels, ++ bool isRedo); ++extern PGDLLIMPORT smgrdounlinkall_hook_type smgrdounlinkall_hook; #endif /* SMGR_H */ -- -1.8.3.1 +2.16.2 diff --git a/quotamodel.c b/quotamodel.c index 38f868b977f..40fb2603612 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -542,7 +542,7 @@ calculate_table_disk_usage(bool force) if (classForm->relkind != RELKIND_RELATION && classForm->relkind != RELKIND_MATVIEW) continue; - relOid = HeapTupleGetOid(tuple); + relOid = classForm->oid; /* ignore system table*/ if(relOid < FirstNormalObjectId) From 99f41f3ffaae1e4670d77bd303b323fa5426b680 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Wed, 12 Dec 2018 16:47:45 +0800 Subject: [PATCH 008/330] Add license file. --- LICENSE | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..b23675b99ba --- /dev/null +++ b/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2004-2018 Pivotal Software, Inc. + +diskquota is licensed under the PostgreSQL license, the same license +as PostgreSQL. It contains parts of PostgreSQL source code. A copy of +the license is below: + +-------------- +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this +paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS +DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. From 5be741f6a802ce4ca70a2dc66150395f0683abd3 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 20 Dec 2018 07:02:15 +0000 Subject: [PATCH 009/330] Support diskquota in gpdb based on pg diskquota. How to support MPP feature, please read design wiki: https://github.com/greenplum-db/gpdb/wiki/Greenplum-Diskquota-Design It shows the difference between gpdiskquota and pgdiskquota. Major Changes include: 1. Using SPI to fetch active tables from each Segments instead of reading active tables from shared memory directly. The change is in file gp_activetable.c 2. Using hook in Dispatcher to do enforcement when query is running. 3. Support AO/CO format. 4. API compitable with gpdb(PG 9.4 v.s. PG master) Co-authored-by: Haozhou Wang Co-authored-by: Hao Wu Co-authored-by: Hubert Zhang --- .gitignore | 52 +- Makefile | 20 +- README.md | 212 ++++---- activetable.c | 327 ------------ diskquota--1.0.sql | 6 + diskquota.c | 352 +++++++------ diskquota.h | 19 +- diskquota_schedule | 4 +- enforcement.c | 87 +++- expected/clean.out | 8 +- expected/empty.out | 0 expected/fini.out | 9 + expected/init.out | 26 + expected/prepare.out | 39 +- expected/test_column.out | 35 +- expected/test_copy.out | 25 +- expected/test_delete_quota.out | 40 ++ expected/test_drop_table.out | 32 +- expected/test_mistake.out | 8 + expected/test_partition.out | 49 +- expected/test_rename.out | 63 +-- expected/test_reschema.out | 26 +- expected/test_role.out | 44 +- expected/test_schema.out | 14 +- expected/test_temp_role.out | 38 +- expected/test_toast.out | 16 +- expected/test_truncate.out | 37 +- expected/test_update.out | 23 +- expected/test_vacuum.out | 39 +- gp_activetable.c | 797 ++++++++++++++++++++++++++++++ activetable.h => gp_activetable.h | 23 +- patch/pg_hooks.patch | 191 ------- quotamodel.c | 327 ++++++------ sql/clean.sql | 8 +- sql/empty.sql | 0 sql/fini.sql | 8 + sql/init.sql | 26 + sql/prepare.sql | 27 +- sql/test_column.sql | 30 +- sql/test_copy.sql | 23 +- sql/test_delete_quota.sql | 19 + sql/test_drop_table.sql | 28 +- sql/test_mistake.sql | 3 + sql/test_partition.sql | 45 +- sql/test_rename.sql | 58 +-- sql/test_reschema.sql | 24 +- sql/test_role.sql | 38 +- sql/test_schema.sql | 6 +- sql/test_temp_role.sql | 33 +- sql/test_toast.sql | 14 +- sql/test_truncate.sql | 32 +- sql/test_update.sql | 20 +- sql/test_vacuum.sql | 34 +- test_diskquota.conf | 5 - 54 files changed, 2061 insertions(+), 1408 deletions(-) delete mode 100644 activetable.c delete mode 100644 expected/empty.out create mode 100644 expected/fini.out create mode 100644 expected/init.out create mode 100644 expected/test_delete_quota.out create mode 100644 expected/test_mistake.out create mode 100644 gp_activetable.c rename activetable.h => gp_activetable.h (52%) delete mode 100644 patch/pg_hooks.patch delete mode 100644 sql/empty.sql create mode 100644 sql/fini.sql create mode 100644 sql/init.sql create mode 100644 sql/test_delete_quota.sql create mode 100644 sql/test_mistake.sql delete mode 100644 test_diskquota.conf diff --git a/.gitignore b/.gitignore index c0b8768012f..23c8e91e16f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,52 +1,6 @@ -# Prerequisites -*.d - -# Object files *.o -*.ko -*.obj -*.elf - -# Linker output -*.ilk -*.map -*.exp - -# Precompiled Headers -*.gch -*.pch - -# Libraries -*.lib -*.a -*.la -*.lo - -# Shared objects (inc. Windows DLLs) -*.dll *.so -*.so.* -*.dylib - -# Executables -*.exe -a.out -*.app -*.i*86 -*.x86_64 -*.hex - -# Debug files -*.dSYM/ -*.su -*.idb -*.pdb -# Kernel Module Compile Results -*.mod* -*.cmd -.tmp_versions/ -modules.order -Module.symvers -Mkfile.old -dkms.conf +regression.out +regression.diffs +/results/ diff --git a/Makefile b/Makefile index 62fec5f42c4..3a63e6a638e 100644 --- a/Makefile +++ b/Makefile @@ -5,11 +5,21 @@ MODULE_big = diskquota EXTENSION = diskquota DATA = diskquota--1.0.sql SRCDIR = ./ -FILES = $(shell find $(SRCDIR) -type f -name "*.c") -OBJS = diskquota.o enforcement.o quotamodel.o activetable.o +FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c +OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o +PG_CPPFLAGS = -I$(libpq_srcdir) +SHLIB_LINK = $(libpq) +SHLIB_PREREQS = submake-libpq REGRESS = dummy -REGRESS_OPTS = --temp-config=test_diskquota.conf --temp-instance=/tmp/pg_diskquota_test --schedule=diskquota_schedule -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) +REGRESS_OPTS = --schedule=diskquota_schedule + +ifdef USE_PGXS +PGXS := $(shell pg_config --pgxs) include $(PGXS) +else +subdir = gpcontrib/gp_diskquota +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/README.md b/README.md index f4d6ac0f701..a472086c631 100644 --- a/README.md +++ b/README.md @@ -1,64 +1,112 @@ # Overview -Diskquota is an extension that provides disk usage enforcement for database objects in Postgresql. Currently it supports to set quota limit on schema and role in a given database and limit the amount of disk space that a schema or a role can use. +Diskquota is an extension that provides disk usage enforcement for database +objects in Greenplum DB. Currently it supports to set quota limit on schema +and role in a given database and limit the amount of disk space that a schema +or a role can use. -This project is inspired by Heikki's pg_quota project (link: https://github.com/hlinnaka/pg_quota) and enhance it to support different kinds of DDL and DML which may change the disk usage of database objects. +This project is inspired by Heikki's +[pg_quota project](https://github.com/hlinnaka/pg_quota) and enhance it in +two aspects: -Diskquota is a soft limit of disk uages. It has some delay to detect the schemas or roles whose quota limit is exceeded. Here 'soft limit' supports two kinds of encforcement: Query loading data into out-of-quota schema/role will be forbidden before query is running. Query loading data into schema/role with rooms will be cancelled when the quota limit is reached dynamically during the query is running. +1. To support different kinds of DDL and DML which may change the disk usage +of database objects. + +2. To support diskquota extension on MPP architecture. + +Diskquota is a soft limit of disk uages. On one hand it has some delay to +detect the schemas or roles whose quota limit is exceeded. On the other hand, +'soft limit' supports two kinds of encforcement: Query loading data into +out-of-quota schema/role will be forbidden before query is running. Query +loading data into schema/role with rooms will be cancelled when the quota +limit is reached dynamically during the query is running. # Design -Diskquota extension is based on background worker framework in Postgresql. +Diskquota extension is based on background worker framework in Greenplum (bg +worker needs pg_verion >= 9.4, which is supported in Greenplum 6 and later). There are two kinds of background workers: diskquota launcher and diskquota worker. -There is only one laucher process per database cluster(i.e. one laucher per postmaster). -Launcher process is reponsible for manage worker processes: Calling RegisterDynamicBackgroundWorker() -to create new workers and keep their handle. Calling TerminateBackgroundWorker() to -terminate workers which are disabled when DBA modify diskquota.monitor_databases - -There are many worker processes, one for each database which is listed in diskquota.monitor_databases. -Currently, we support to monitor at most 10 databases at the same time. -Worker processes are responsible for monitoring the disk usage of schemas and roles for the target database, -and do quota enfocement. It will periodically (can be set via diskquota.naptime) recalcualte the table size of active tables, and update their corresponding schema or owner's disk usage. Then compare with quota limit for those schemas or roles. If exceeds the limit, put the corresponding schemas or roles into the blacklist in shared memory. Schemas or roles in blacklist are used to do query enforcement to cancel queries which plan to load data into these schemas or roles. +There is only one launcher process per database master. There is no launcher +process for segments. +Launcher process is reponsible for manage worker processes: Calling +RegisterDynamicBackgroundWorker() to create new workers and keep their handle. +Calling TerminateBackgroundWorker() to terminate workers which are disabled +when DBA modify GUC diskquota.monitor_databases. + +There are many worker processes, one for each database which is listed +in diskquota.monitor_databases. Same as launcher process, worker processes +only run at master node. Since each worker process needs to call SPI to fetch +active table size, to limit the total cost of worker processes, we support to +monitor at most 10 databases at the same time currently. Worker processes are +responsible for monitoring the disk usage of schemas and roles for the target +database, and do quota enforcement. It will periodically (can be set via +diskquota.naptime) recalculate the table size of active tables, and update +their corresponding schema or owner's disk usage. Then compare with quota +limit for those schemas or roles. If exceeds the limit, put the corresponding +schemas or roles into the blacklist in shared memory. Schemas or roles in +blacklist are used to do query enforcement to cancel queries which plan to +load data into these schemas or roles. + +From MPP perspective, diskquota launcher and worker processes are all run at +Master side. Master only design allows us to save the memory resource on +Segments, and simplifies the communication from Master to Segment by call SPI +queries periodically. Segments are used to detected the active table and +calculated the active table size. Master aggregate the table size from each +segments and maintain the disk quota model. ## Active table -Active tables are the tables whose table size may change in the last quota check interval. We use hooks in smgecreate(), smgrextend() and smgrtruncate() to detect active tables and store them(currently relfilenode) in the shared memory. Diskquota worker process will periodically consuming active table in shared memories, convert relfilenode to relaton oid, and calcualte table size by calling pg_total_relation_size(), which will sum the size of table(including: base, vm, fsm, toast and index). +Active tables are the tables whose table size may change in the last quota +check interval. Active tables are detected at Segment QE side: hooks in +smgecreate(), smgrextend() and smgrtruncate() are used to detect active tables +and store them (currently relfilenode) in the shared memory. Diskquota worker +process will periodically call dispatch queries to all the segments and +consume active tables in shared memories, convert relfilenode to relaton oid, +and calcualte table size by calling pg_total_relation_size(), which will sum +the size of table (including: base, vm, fsm, toast and index) in each segment. ## Enforcement -Enforcement is implemented as hooks. There are two kinds of enforcement hooks: enforcement before query is running and -enforcement during query is running. -The 'before query' one is implemented at ExecutorCheckPerms_hook in function ExecCheckRTPerms() -The 'during query' one is implemented at BufferExtendCheckPerms_hook in function ReadBufferExtended(). Note that the implementation of BufferExtendCheckPerms_hook will firstly check whether function request a new block, if not skip directyly. +Enforcement is implemented as hooks. There are two kinds of enforcement hooks: +enforcement before query is running and enforcement during query is running. +The 'before query' one is implemented at ExecutorCheckPerms_hook in function +ExecCheckRTPerms() +The 'during query' one is implemented at DispatcherCheckPerms_hook in function +checkDispatchResult(). For queries loading a huge number of data, dispatcher +will poll the connnection with a poll timeout. Hook will be called at every +poll timeout with waitMode == DISPATCH_WAIT_NONE. Currently only async +diskpatcher supports 'during query' quota enforcement. ## Quota setting store -Quota limit of a schema or a role is stored in table 'quota_config' in 'diskquota' schema in monitored database. So each database stores and manages its own disk quota configuration. Note that although role is a db object in cluster level, we limit the diskquota of a role to be database specific. That is to say, a role may has different quota limit on different databases and their disk usage is isolated between databases. +Quota limit of a schema or a role is stored in table 'quota_config' in +'diskquota' schema in monitored database. So each database stores and manages +its own disk quota configuration. Note that although role is a db object in +cluster level, we limit the diskquota of a role to be database specific. +That is to say, a role may have different quota limit on different databases +and their disk usage is isolated between databases. # Install -1. Add hook functions to Postgres by applying patch. It's required -since disk quota need to add some new hook functions in postgres core. -This step would be skipped after patch is merged into postgres in future. -``` -# install patch into postgres_src and rebuild postgres. -cd postgres_src; -git apply $diskquota_src/patch/pg_hooks.patch; -make; -make install; -``` -2. Compile and install disk quota. +1. Compile gpdb and disk quota is enabled by default. ``` -cd $diskquota_src; +cd $gpdb_src; make; make install; ``` -3. Config postgresql.conf + +2. Enable diskquota as preload library (in future, we may set diskquota in +shared_preload_libraries by default). ``` # enable diskquota in preload library. -shared_preload_libraries = 'diskquota' +gpconfig -c shared_preload_libraries -v 'diskquota' +# restart database. +gpstop -ar +``` + +3. Config GUC of diskquota. +``` # set monitored databases -diskquota.monitor_databases = 'postgres' -# set naptime (second) to refresh the disk quota stats periodically -diskquota.naptime = 2 -# restart database to load preload library. -pg_ctl restart +gpconfig -c diskquota.monitor_databases -v 'postgres' +# set naptime ( second ) to refresh the disk quota stats periodically +gpconfig -c diskquota.naptime -v 2 ``` + 4. Create diskquota extension in monitored database. ``` create extension diskquota; @@ -66,10 +114,10 @@ create extension diskquota; 5. Reload database configuraion ``` -# reset monitored database list in postgresql.conf -diskquota.monitor_databases = 'postgres, postgres2' +# reset monitored database list +gpconfig -c diskquota.monitor_databases -v 'postgres, postgres2' # reload configuration -pg_ctl reload +gpstop -u ``` # Usage @@ -126,64 +174,42 @@ select * from diskquota.show_schema_quota_view; # Test Run regression tests. ``` -cd contrib/diskquota; +cd gpcontrib/gp_diskquota; make installcheck ``` +# HA +Not implemented yet. One solution would be: start launcher process on standby +and enable it to fork worker processes when switch from standby Master to Master. + # Benchmark & Performence Test ## Cost of diskquota worker. -During each refresh interval, the disk quota worker need to refresh the disk quota model. - -It take less than 100ms under 100K user tables with no avtive tables. - -It take less than 200ms under 100K user tables with 1K active tables. +To be added. ## Impact on OLTP queries -We test OLTP queries to measure the impact of enabling diskquota feature. The range is from 2k tables to 10k tables. -Each connection will insert 100 rows into each table. And the parallel connections range is from 5 to 25. Number of active tables will be around 1k. - -Without diskquota enabled (seconds) - -| | 2k | 4k | 6k | 8k | 10k | -|:-: |:-: |:-: |:-: |:-: |--- | -| 5 | 4.002 | 11.356 | 18.460 | 28.591 | 41.123 | -| 10 | 4.832 | 11.988 | 21.113 | 32.829 | 45.832 | -| 15 | 6.238 | 16.896 | 28.722 | 45.375 | 64.642 | -| 20 | 8.036 | 21.711 | 38.499 | 61.763 | 87.875 | -| 25 | 9.909 | 27.175 | 47.996 | 75.688 | 106.648 | - -With diskquota enabled (seconds) - -| | 2k | 4k | 6k | 8k | 10k | -|:-: |:-: |:-: |:-: |:-: |--- | -| 5 | 4.135 | 10.641 | 18.776 | 28.804 | 41.740 | -| 10 | 4.773 | 12.407 | 22.351 | 34.243 | 47.568 | -| 15 | 6.355 | 17.305 | 30.941 | 46.967 | 66.216 | -| 20 | 9.451 | 22.231 | 40.645 | 61.758 | 88.309 | -| 25 | 10.096 | 26.844 | 48.910 | 76.537 | 108.025 | - -The performance difference between with/without diskquota enabled are less then 2-3% in most case. Therefore, there is no significant performance downgrade when diskquota is enabled. +To be added. # Notes 1. Drop database with diskquota enabled. If DBA enable monitoring diskquota on a database, there will be a connection to this database from diskquota worker process. DBA need to first remove this -database from diskquota.monitor_databases in postgres.conf, and reload -configuration by call `pg_ctl reload`. Then database could be dropped successfully. +database from GUC diskquota.monitor_databases , and reload +configuration by call `gpstop -u`. Then database could be dropped successfully. 2. Temp table. -Diskquota supports to limit the disk usage of temp table as well. But schema and role are different. -For role, i.e. the owner of the temp table, diakquota will treat it the same as normal tables and sum its -table size to its owner's quota. While for schema, temp table is located under namespace 'pg_temp_backend_id', -so temp table size will not sum to the current schema's qouta. +Diskquota supports to limit the disk usage of temp table as well. +But schema and role are different. For role, i.e. the owner of the temp table, +diskquota will treat it the same as normal tables and sum its table size to +its owner's quota. While for schema, temp table is located under namespace +'pg_temp_backend_id', so temp table size will not sum to the current schema's qouta. # Known Issue. -1. Since Postgresql doesn't support READ UNCOMMITTED isolation level, +1. Since Greenplum doesn't support READ UNCOMMITTED isolation level, our implementation cannot detect the new created table inside an -uncommitted transaction(See below example). Hence enforcement on +uncommitted transaction (See below example). Hence enforcement on that newly created table will not work. After transaction commit, diskquota worker process could detect the newly create table and do enfocement accordingly in later queries. @@ -203,20 +229,32 @@ insert into a select generate_series(1,200000); END; ``` +'Create Table As' command has the similar problem. + One solution direction is that we calculate the additional 'uncommited data size' for schema and role in worker process. Since pg_total_relation_size need to hold -AccessShareLock to relation(And worker process don't even know this reloid exists), +AccessShareLock to relation (And worker process don't even know this reloid exists), we need to skip it, and call stat() directly with tolerant to file unlink. Skip lock is dangerous and we plan to leave it as known issue at current stage. -2. Out of shared memory +2. Missing empty schema or role in show_schema_quota_view and show_role_quota_view +Currently, if there is no table in a specific schema or no table's owner is a +specific role, these schemas or roles will not be listed in +show_schema_quota_view and show_role_quota_view. + +3. Out of shared memory -Diskquota extension uses two kinds of shared memories. One is used to save black list and another one is -to save active table list. The black list shared memory can support up to 1 MiB database objects which exceed quota limit. -The active table list shared memory can support up to 1 MiB active tables in default, and user could reset it in GUC diskquota_max_active_tables. +Diskquota extension uses two kinds of shared memories. One is used to save +black list and another one is to save active table list. The black list shared +memory can support up to 1 MiB database objects which exceed quota limit. +The active table list shared memory can support up to 1 MiB active tables in +default, and user could reset it in GUC diskquota_max_active_tables. -As shared memory is pre-allocated, user needs to restart DB if they updated this GUC value. +As shared memory is pre-allocated, user needs to restart DB if they updated +this GUC value. -If black list shared memory is full, it's possible to load data into some schemas or roles which quota limit are reached. -If active table shared memory is full, disk quota worker may failed to detect the corresponding disk usage change in time. +If black list shared memory is full, it's possible to load data into some +schemas or roles which quota limit are reached. +If active table shared memory is full, disk quota worker may failed to detect +the corresponding disk usage change in time. diff --git a/activetable.c b/activetable.c deleted file mode 100644 index 6da44b364e4..00000000000 --- a/activetable.c +++ /dev/null @@ -1,327 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * activetable.c - * - * This code is responsible for detecting active table for databases - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "access/htup_details.h" -#include "catalog/indexing.h" -#include "catalog/pg_class.h" -#include "catalog/pg_type.h" -#include "executor/spi.h" -#include "funcapi.h" -#include "miscadmin.h" -#include "storage/shmem.h" -#include "storage/smgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/relfilenodemap.h" - -#include "activetable.h" - -HTAB *active_tables_map = NULL; -static smgrcreate_hook_type prev_smgrcreate_hook = NULL; -static smgrextend_hook_type prev_smgrextend_hook = NULL; -static smgrtruncate_hook_type prev_smgrtruncate_hook = NULL; -static void active_table_hook_smgrcreate(SMgrRelation reln, - ForkNumber forknum, - bool isRedo); -static void active_table_hook_smgrextend(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum, - char *buffer, - bool skipFsync); -static void active_table_hook_smgrtruncate(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum); - -static void report_active_table_SmgrStat(SMgrRelation reln); -static HTAB* get_active_tables_stats(void); -static HTAB* get_all_tables_stats(void); - -void init_active_table_hook(void); -void init_shm_worker_active_tables(void); -void init_lock_active_tables(void); -HTAB* pg_fetch_active_tables(bool); - -/* - * Register smgr hook to detect active table. - */ -void -init_active_table_hook(void) -{ - prev_smgrcreate_hook = smgrcreate_hook; - smgrcreate_hook = active_table_hook_smgrcreate; - - prev_smgrextend_hook = smgrextend_hook; - smgrextend_hook = active_table_hook_smgrextend; - - prev_smgrtruncate_hook = smgrtruncate_hook; - smgrtruncate_hook = active_table_hook_smgrtruncate; -} - -static void -active_table_hook_smgrcreate(SMgrRelation reln, - pg_attribute_unused() ForkNumber forknum, - pg_attribute_unused() bool isRedo) -{ - report_active_table_SmgrStat(reln); -} - -static void -active_table_hook_smgrextend(SMgrRelation reln, - pg_attribute_unused() ForkNumber forknum, - pg_attribute_unused() BlockNumber blocknum, - pg_attribute_unused() char *buffer, - pg_attribute_unused() bool skipFsync) -{ - report_active_table_SmgrStat(reln); -} - -static void -active_table_hook_smgrtruncate(SMgrRelation reln, - pg_attribute_unused() ForkNumber forknum, - pg_attribute_unused() BlockNumber blocknum) -{ - report_active_table_SmgrStat(reln); -} - -/* - * Init active_tables_map shared memory - */ -void -init_shm_worker_active_tables(void) -{ - HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); - - ctl.keysize = sizeof(DiskQuotaActiveTableEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hash = tag_hash; - - active_tables_map = ShmemInitHash ("active_tables", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); -} - -/* - * Init lock of active table map - */ -void init_lock_active_tables(void) -{ - bool found = false; - active_table_shm_lock = ShmemInitStruct("disk_quota_active_table_shm_lock", - sizeof(disk_quota_shared_state), - &found); - - if (!found) - { - active_table_shm_lock->lock = &(GetNamedLWLockTranche("disk_quota_active_table_shm_lock"))->lock; - } -} - -/* - * Fetch active table file size statistics. - * If force is true, then fetch all the tables. - */ -HTAB* pg_fetch_active_tables(bool force) -{ - if (force) - { - return get_all_tables_stats(); - } - else - { - return get_active_tables_stats(); - } -} - -/* - * Get the table size statistics for all the tables - */ -static HTAB* -get_all_tables_stats() -{ - HTAB *local_table_stats_map = NULL; - HASHCTL ctl; - HeapTuple tuple; - Relation classRel; - HeapScanDesc relScan; - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_table_stats_map = hash_create("local table map with table size info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); - - while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) - { - Oid relOid; - DiskQuotaActiveTableEntry *entry; - - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW) - continue; - relOid = classForm->oid; - - /* ignore system table*/ - if (relOid < FirstNormalObjectId) - continue; - - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &relOid, HASH_ENTER, NULL); - - entry->tableoid = relOid; - entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, - ObjectIdGetDatum(relOid))); - - } - - heap_endscan(relScan); - heap_close(classRel, AccessShareLock); - - return local_table_stats_map; -} -/* - * Get local active table with table oid and table size info. - * This function first copies active table map from shared memory - * to local active table map with refilenode info. Then traverses - * the local map and find corresponding table oid and table file - * size. Finnaly stores them into local active table map and return. - */ -static HTAB* -get_active_tables_stats() -{ - HASHCTL ctl; - HTAB *local_active_table_file_map = NULL; - HTAB *local_active_table_stats_map = NULL; - HASH_SEQ_STATUS iter; - DiskQuotaActiveTableFileEntry *active_table_file_entry; - DiskQuotaActiveTableEntry *active_table_entry; - - Oid relOid; - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - - local_active_table_file_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - /* Move active table from shared memory to local active table map */ - LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); - - hash_seq_init(&iter, active_tables_map); - - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) - { - bool found; - DiskQuotaActiveTableFileEntry *entry; - - if (active_table_file_entry->dbid != MyDatabaseId) - { - continue; - } - - /* Add the active table entry into local hash table*/ - entry = hash_search(local_active_table_file_map, active_table_file_entry, HASH_ENTER, &found); - if (entry) - *entry = *active_table_file_entry; - hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); - } - - LWLockRelease(active_table_shm_lock->lock); - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_active_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - /* traverse local active table map and calculate their file size. */ - hash_seq_init(&iter, local_active_table_file_map); - /* scan whole local map, get the oid of each table and calculate the size of them */ - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) - { - Size tablesize; - bool found; - - relOid = RelidByRelfilenode(active_table_file_entry->tablespaceoid, active_table_file_entry->relfilenode); - - //TODO replace DirectFunctionCall1 by a new total relation size function, which could handle Invalid relOid - /* avoid to generate ERROR if relOid is not existed (i.e. table has been droped) */ - PG_TRY(); - { - tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, - ObjectIdGetDatum(relOid))); - } - PG_CATCH(); - { - FlushErrorState(); - tablesize = 0; - } - PG_END_TRY(); - active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); - active_table_entry->tableoid = relOid; - active_table_entry->tablesize = tablesize; - } - elog(DEBUG1, "active table number is:%ld", hash_get_num_entries(local_active_table_file_map)); - hash_destroy(local_active_table_file_map); - return local_active_table_stats_map; -} - -/* - * Hook function in smgr to report the active table - * information and stroe them in active table shared memory - * diskquota worker will consuming these active tables and - * recalculate their file size to update diskquota model. - */ -static void -report_active_table_SmgrStat(SMgrRelation reln) -{ - DiskQuotaActiveTableFileEntry *entry; - DiskQuotaActiveTableFileEntry item; - bool found = false; - - MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); - item.dbid = reln->smgr_rnode.node.dbNode; - item.relfilenode = reln->smgr_rnode.node.relNode; - item.tablespaceoid = reln->smgr_rnode.node.spcNode; - - LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); - entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); - if (entry && !found) - *entry = item; - LWLockRelease(active_table_shm_lock->lock); - - if (!found && entry == NULL) { - /* We may miss the file size change of this relation at current refresh interval.*/ - ereport(WARNING, (errmsg("Share memory is not enough for active tables."))); - } -} diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 925f2c2a41b..0fad6e46e50 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -34,4 +34,10 @@ FROM pg_roles, pg_class, diskquota.quota_config as quota WHERE pg_class.relowner = quota.targetoid and pg_class.relowner = pg_roles.oid and quota.quotatype=1 GROUP BY pg_class.relowner, pg_roles.rolname, quota.quotalimitMB; +CREATE TYPE diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); + +CREATE OR REPLACE FUNCTION diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota_active_table_type +AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' +LANGUAGE C VOLATILE; + reset search_path; diff --git a/diskquota.c b/diskquota.c index 802a753cb78..e8f5dcd72c5 100644 --- a/diskquota.c +++ b/diskquota.c @@ -3,31 +3,38 @@ * diskquota.c * * Diskquota is used to limit the amount of disk space that a schema or a role - * can use. Diskquota is based on background worker framework. It contains a + * can use. Diskquota is based on background worker framework. It contains a * launcher process which is responsible for starting/refreshing the diskquota - * worker processes which monitor given databases. + * worker processes which monitor given databases. * - * Copyright (C) 2013, PostgreSQL Global Development Group + * Copyright (c) 2018-Present Pivotal Software, Inc. * + * IDENTIFICATION + * gpcontrib/gp_diskquota/diskquota.c * * ------------------------------------------------------------------------- */ #include "postgres.h" +#include + #include "catalog/namespace.h" #include "catalog/pg_collation.h" +#include "cdb/cdbvars.h" #include "executor/spi.h" +#include "libpq/libpq-be.h" #include "miscadmin.h" #include "pgstat.h" #include "postmaster/bgworker.h" #include "storage/ipc.h" +#include "storage/proc.h" #include "tcop/utility.h" +#include "utils/acl.h" #include "utils/builtins.h" #include "utils/formatting.h" #include "utils/numeric.h" -#include "utils/varlena.h" -#include "activetable.h" +#include "gp_activetable.h" #include "diskquota.h" PG_MODULE_MAGIC; @@ -35,21 +42,24 @@ PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); +/* max number of monitored database with diskquota enabled */ +#define MAX_NUM_MONITORED_DB 10 + /* flags set by signal handlers */ static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; /* GUC variables */ -int diskquota_naptime = 0; -char *diskquota_monitored_database_list = NULL; -int diskquota_max_active_tables = 0; +int diskquota_naptime = 0; +char *diskquota_monitored_database_list = NULL; +int diskquota_max_active_tables = 0; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; /* disk quota worker info used by launcher to manage the worker processes. */ struct DiskQuotaWorkerEntry { - char dbname[NAMEDATALEN]; + char dbname[NAMEDATALEN]; BackgroundWorkerHandle *handle; }; @@ -57,18 +67,18 @@ struct DiskQuotaWorkerEntry static HTAB *disk_quota_worker_map = NULL; /* functions of disk quota*/ -void _PG_init(void); -void _PG_fini(void); -void disk_quota_worker_main(Datum); -void disk_quota_launcher_main(Datum); +void _PG_init(void); +void _PG_fini(void); +void disk_quota_worker_main(Datum); +void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); -static List *get_database_list(void); +static List *get_database_list(bool *is_refresh); static int64 get_size_in_mb(char *str); static void refresh_worker_list(void); static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static int start_worker(char* dbname); +static int start_worker(char *dbname); /* * Entrypoint of diskquota module. @@ -82,6 +92,10 @@ _PG_init(void) { BackgroundWorker worker; + /* diskquota.so must be in shared_preload_libraries to init SHM. */ + if (!process_shared_preload_libraries_in_progress) + elog(ERROR, "diskquota.so not in shared_preload_libraries."); + init_disk_quota_shmem(); init_disk_quota_enforcement(); init_active_table_hook(); @@ -91,7 +105,7 @@ _PG_init(void) "Duration between each check (in seconds).", NULL, &diskquota_naptime, - 10, + 5, 1, INT_MAX, PGC_SIGHUP, @@ -104,14 +118,14 @@ _PG_init(void) return; DefineCustomStringVariable("diskquota.monitor_databases", - gettext_noop("database list with disk quota monitored."), - NULL, - &diskquota_monitored_database_list, - "", - PGC_SIGHUP, GUC_LIST_INPUT, - NULL, - NULL, - NULL); + gettext_noop("database list with disk quota monitored."), + NULL, + &diskquota_monitored_database_list, + "", + PGC_SIGHUP, GUC_LIST_INPUT, + NULL, + NULL, + NULL); DefineCustomIntVariable("diskquota.max_active_tables", "max number of active tables monitored by disk-quota", @@ -126,13 +140,19 @@ _PG_init(void) NULL, NULL); + /* start disk quota launcher only on master */ + if (Gp_role != GP_ROLE_DISPATCH) + { + return; + } + /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; - sprintf(worker.bgw_library_name, "diskquota"); - sprintf(worker.bgw_function_name, "disk_quota_launcher_main"); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "diskquota"); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_launcher_main"); worker.bgw_notify_pid = 0; snprintf(worker.bgw_name, BGW_MAXLEN, "disk quota launcher"); @@ -189,8 +209,11 @@ disk_quota_sighup(SIGNAL_ARGS) void disk_quota_worker_main(Datum main_arg) { - char *dbname=MyBgworkerEntry->bgw_name; - elog(LOG,"start disk quota worker process to monitor database:%s", dbname); + char *dbname = MyBgworkerEntry->bgw_name; + + ereport(LOG, + (errmsg("start disk quota worker process to monitor database:%s", + dbname))); /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); @@ -200,9 +223,12 @@ disk_quota_worker_main(Datum main_arg) BackgroundWorkerUnblockSignals(); /* Connect to our database */ - BackgroundWorkerInitializeConnection(dbname, NULL, 0); + BackgroundWorkerInitializeConnection(dbname, NULL); - /* Initialize diskquota related local hash map and refresh model immediately*/ + /* + * Initialize diskquota related local hash map and refresh model + * immediately + */ init_disk_quota_model(); refresh_disk_quota_model(true); @@ -221,7 +247,7 @@ disk_quota_worker_main(Datum main_arg) */ rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L, PG_WAIT_EXTENSION); + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); /* Do the work */ @@ -252,10 +278,10 @@ disk_quota_worker_main(Datum main_arg) void disk_quota_launcher_main(Datum main_arg) { - List *dblist; - ListCell *cell; + List *dblist; + ListCell *cell; HASHCTL hash_ctl; - int db_count = 0; + bool is_refresh = false; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); @@ -269,27 +295,33 @@ disk_quota_launcher_main(Datum main_arg) hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); disk_quota_worker_map = hash_create("disk quota worker map", - 1024, - &hash_ctl, - HASH_ELEM); + 1024, + &hash_ctl, + HASH_ELEM); - dblist = get_database_list(); - elog(LOG,"diskquota launcher started"); - foreach(cell, dblist) - { - char *db_name; + ereport(LOG, + (errmsg("diskquota launcher started"))); - if (db_count >= 10) - break; - db_name = (char *)lfirst(cell); - if (db_name == NULL || *db_name == '\0') + dblist = get_database_list(&is_refresh); + if (is_refresh) + { + foreach(cell, dblist) { - elog(LOG, "invalid db name='%s' in diskquota.monitor_databases", db_name); - continue; + char *db_name; + + db_name = (char *) lfirst(cell); + if (db_name == NULL || *db_name == '\0') + { + ereport(LOG, + (errmsg("invalid db name='%s' in diskquota.monitor_databases", db_name))); + continue; + } + start_worker(db_name); } - start_worker(db_name); - db_count++; } + /* free dblist */ + list_free(dblist); + /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -304,8 +336,8 @@ disk_quota_launcher_main(Datum main_arg) * background process goes away immediately in an emergency. */ rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L, PG_WAIT_EXTENSION); + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); /* emergency bailout if postmaster has died */ @@ -319,7 +351,11 @@ disk_quota_launcher_main(Datum main_arg) { got_sighup = false; ProcessConfigFile(PGC_SIGHUP); - /* terminate not monitored worker process and start new worker process */ + + /* + * terminate not monitored worker process and start new worker + * process + */ refresh_worker_list(); } @@ -329,22 +365,48 @@ disk_quota_launcher_main(Datum main_arg) } /* - * database list found in GUC diskquota.monitored_database_list + * Extract database list in GUC diskquota.monitored_database_list + * Parameter is_refresh is used to indicate whether to refresh the + * monitored database list when GUC monitored_database_list changed. + * If GUC contains more than 10 databases, is_refresh is set to false. */ static List * -get_database_list(void) +get_database_list(bool *is_refresh) { - List *dblist = NULL; - char *dbstr; + List *monitor_db_list = NIL; + char *dbstr; + *is_refresh = true; dbstr = pstrdup(diskquota_monitored_database_list); - if (!SplitIdentifierString(dbstr, ',', &dblist)) + if (!SplitIdentifierString(dbstr, ',', &monitor_db_list)) { - elog(WARNING, "cann't get database list from guc:'%s'", diskquota_monitored_database_list); + ereport(WARNING, + (errmsg("GUC monitor_databases:'%s' is invalid, GUC should be" + "separated by comma", + diskquota_monitored_database_list))); + pfree(dbstr); return NULL; } - return dblist; + + /* + * We only allow to minitor at most 10 databases truncate the list if + * there are more than 10 databases in list. + */ + if (list_length(monitor_db_list) > MAX_NUM_MONITORED_DB) + { + *is_refresh = false; + ereport(WARNING, + (errmsg("Currently diskquota could monitor at most 10 databases." + "GUC monitor_databases:'%s' contains more than" + " 10 databases, additional databases will be ignored.", + diskquota_monitored_database_list))); + monitor_db_list = list_truncate(monitor_db_list, MAX_NUM_MONITORED_DB); + } + + pfree(dbstr); + /* dblist should be list_free by the caller */ + return monitor_db_list; } /* @@ -355,41 +417,47 @@ get_database_list(void) static void refresh_worker_list(void) { - List *monitor_dblist; - List *removed_workerlist; - ListCell *cell; - ListCell *removed_workercell; - bool flag = false; - bool found; + List *monitor_dblist; + ListCell *cell; + bool flag = false; + bool is_refresh = false; + bool found; DiskQuotaWorkerEntry *hash_entry; HASH_SEQ_STATUS status; - int db_count = 0; - removed_workerlist = NIL; - monitor_dblist = get_database_list(); + monitor_dblist = get_database_list(&is_refresh); + if (!is_refresh) + { + ereport(WARNING, + (errmsg("Failed to refresh monitored database. GUC " + "monitor_databases:'%s' should contain less than " + "10 databases.", + diskquota_monitored_database_list))); + return; + } + /* - * refresh the worker process based on the configuration file change. - * step 1 is to terminate worker processes whose connected database - * not in monitor database list. + * refresh the worker process based on the configuration file change. step + * 1 is to terminate worker processes whose connected database not in + * monitor database list. */ - elog(LOG,"Refresh monitored database list."); + ereport(LOG, + (errmsg("Refresh monitored database list."))); hash_seq_init(&status, disk_quota_worker_map); - while ((hash_entry = (DiskQuotaWorkerEntry*) hash_seq_search(&status)) != NULL) + while ((hash_entry = (DiskQuotaWorkerEntry *) hash_seq_search(&status)) != NULL) { flag = false; foreach(cell, monitor_dblist) { - char *db_name; + char *db_name; - if (db_count >= 10) - break; - db_name = (char *)lfirst(cell); + db_name = (char *) lfirst(cell); if (db_name == NULL || *db_name == '\0') { continue; } - if (strcmp(db_name, hash_entry->dbname) == 0 ) + if (strcmp(db_name, hash_entry->dbname) == 0) { flag = true; break; @@ -397,46 +465,28 @@ refresh_worker_list(void) } if (!flag) { - removed_workerlist = lappend(removed_workerlist, hash_entry->dbname); - } - } - - foreach(removed_workercell, removed_workerlist) - { - DiskQuotaWorkerEntry* workerentry; - char *db_name; - BackgroundWorkerHandle *handle; - - db_name = (char *)lfirst(removed_workercell); - - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, - (void *)db_name, - HASH_REMOVE, &found); - if(found) - { - handle = workerentry->handle; - TerminateBackgroundWorker(handle); + TerminateBackgroundWorker(hash_entry->handle); + (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) hash_entry->dbname, + HASH_REMOVE, NULL); } } /* step 2: start new worker which first appears in monitor database list. */ - db_count = 0; foreach(cell, monitor_dblist) { - DiskQuotaWorkerEntry* workerentry; - char *db_name; - pid_t pid; + DiskQuotaWorkerEntry *workerentry; + char *db_name; + pid_t pid; - if (db_count >= 10) - break; - db_name = (char *)lfirst(cell); + db_name = (char *) lfirst(cell); if (db_name == NULL || *db_name == '\0') { continue; } - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, - (void *)db_name, - HASH_FIND, &found); + workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) db_name, + HASH_FIND, &found); if (found) { /* in case worker is not in BGWH_STARTED mode, restart it. */ @@ -448,28 +498,31 @@ refresh_worker_list(void) start_worker(db_name); } } + + /* free monitor_dblist */ + list_free(monitor_dblist); } /* * Dynamically launch an disk quota worker process. */ static int -start_worker(char* dbname) +start_worker(char *dbname) { BackgroundWorker worker; BackgroundWorkerHandle *handle; BgwHandleStatus status; pid_t pid; - bool found; - DiskQuotaWorkerEntry* workerentry; + bool found; + DiskQuotaWorkerEntry *workerentry; memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; - sprintf(worker.bgw_library_name, "diskquota"); - sprintf(worker.bgw_function_name, "disk_quota_worker_main"); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "diskquota"); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_worker_main"); snprintf(worker.bgw_name, BGW_MAXLEN, "%s", dbname); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; @@ -484,18 +537,18 @@ start_worker(char* dbname) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), - errhint("More details may be available in the server log."))); + errhint("More details may be available in the server log."))); if (status == BGWH_POSTMASTER_DIED) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("cannot start background processes without postmaster"), + errmsg("cannot start background processes without postmaster"), errhint("Kill all remaining database processes and restart the database."))); Assert(status == BGWH_STARTED); /* put the worker handle into the worker map */ - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, - (void *)dbname, - HASH_ENTER, &found); + workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) dbname, + HASH_ENTER, &found); if (!found) { workerentry->handle = handle; @@ -511,10 +564,10 @@ start_worker(char* dbname) Datum set_role_quota(PG_FUNCTION_ARGS) { - Oid roleoid; - char *rolname; - char *sizestr; - int64 quota_limit_mb; + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; if (!superuser()) { @@ -541,10 +594,11 @@ set_role_quota(PG_FUNCTION_ARGS) Datum set_schema_quota(PG_FUNCTION_ARGS) { - Oid namespaceoid; - char *nspname; - char *sizestr; - int64 quota_limit_mb; + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + if (!superuser()) { ereport(ERROR, @@ -557,7 +611,7 @@ set_schema_quota(PG_FUNCTION_ARGS) namespaceoid = get_namespace_oid(nspname, false); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); set_quota_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); @@ -571,14 +625,14 @@ set_schema_quota(PG_FUNCTION_ARGS) static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) { - int ret; + int ret; StringInfoData buf; - + initStringInfo(&buf); appendStringInfo(&buf, - "select * from diskquota.quota_config where targetoid = %u" - " and quotatype =%d", - targetoid, type); + "select true from diskquota.quota_config where targetoid = %u" + " and quotatype =%d", + targetoid, type); SPI_connect(); @@ -586,14 +640,14 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); - /* if the schema or role's quota has been set before*/ + /* if the schema or role's quota has been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { resetStringInfo(&buf); initStringInfo(&buf); appendStringInfo(&buf, - "insert into diskquota.quota_config values(%u,%d,%ld);", - targetoid, type, quota_limit_mb); + "insert into diskquota.quota_config values(%u,%d,%ld);", + targetoid, type, quota_limit_mb); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); @@ -603,25 +657,26 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) resetStringInfo(&buf); initStringInfo(&buf); appendStringInfo(&buf, - "delete from diskquota.quota_config where targetoid=%u" - " and quotatype=%d;", - targetoid, type); + "delete from diskquota.quota_config where targetoid=%u" + " and quotatype=%d;", + targetoid, type); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); } - else if(SPI_processed > 0 && quota_limit_mb > 0) + else if (SPI_processed > 0 && quota_limit_mb > 0) { resetStringInfo(&buf); initStringInfo(&buf); appendStringInfo(&buf, - "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" - " and quotatype=%d;", - quota_limit_mb, targetoid, type); + "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" + " and quotatype=%d;", + quota_limit_mb, targetoid, type); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); } + /* * And finish our transaction. */ @@ -635,10 +690,11 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) static int64 get_size_in_mb(char *str) { - char *strptr, *endptr; + char *strptr, + *endptr; char saved_char; - Numeric num; - int64 result; + Numeric num; + int64 result; bool have_digits = false; /* Skip leading whitespace */ @@ -685,7 +741,7 @@ get_size_in_mb(char *str) if (*endptr == 'e' || *endptr == 'E') { long exponent; - char *cp; + char *cp; /* * Note we might one day support EB units, so if what follows 'E' @@ -738,9 +794,9 @@ get_size_in_mb(char *str) multiplier = ((int64) 1024); else if (pg_strcasecmp(strptr, "tb") == 0) - multiplier = ((int64) 1024) * 1024 ; + multiplier = ((int64) 1024) * 1024; else if (pg_strcasecmp(strptr, "pb") == 0) - multiplier = ((int64) 1024) * 1024 * 1024 ; + multiplier = ((int64) 1024) * 1024 * 1024; else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), diff --git a/diskquota.h b/diskquota.h index 87b2fabf87e..c3b4d75864d 100644 --- a/diskquota.h +++ b/diskquota.h @@ -7,12 +7,19 @@ typedef enum { NAMESPACE_QUOTA, ROLE_QUOTA -} QuotaType; +} QuotaType; + +typedef enum +{ + FETCH_ALL_SIZE, /* fetch size for all the tables */ + FETCH_ACTIVE_OID, /* fetch active table list */ + FETCH_ACTIVE_SIZE /* fetch size for active tables */ +} FetchTableStatType; typedef struct { - LWLock *lock; /* protects shared memory of blackMap */ -} disk_quota_shared_state; + LWLock *lock; /* protects shared memory of blackMap */ +} disk_quota_shared_state; /* enforcement interface*/ extern void init_disk_quota_enforcement(void); @@ -26,8 +33,10 @@ extern bool quota_check_common(Oid reloid); /* quotaspi interface */ extern void init_disk_quota_hook(void); -extern int diskquota_naptime; +extern int diskquota_naptime; extern char *diskquota_monitored_database_list; -extern int diskquota_max_active_tables; +extern int diskquota_max_active_tables; + +extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); #endif diff --git a/diskquota_schedule b/diskquota_schedule index 283ddc66bac..64b4c7524ff 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -1,6 +1,8 @@ +test: init test: prepare -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_partition test: test_vacuum test: clean +test: fini diff --git a/enforcement.c b/enforcement.c index 02cadf355b0..0b71ede07f4 100644 --- a/enforcement.c +++ b/enforcement.c @@ -2,29 +2,37 @@ * * enforcment.c * - * This code registers enforcement hooks to cancle the query which exceeds + * This code registers enforcement hooks to cancel the query which exceeds * the quota limit. * - * Copyright (C) 2013, PostgreSQL Global Development Group + * Copyright (c) 2018-Present Pivotal Software, Inc. * + * IDENTIFICATION + * gpcontrib/gp_diskquota/enforcement.c * * ------------------------------------------------------------------------- */ #include "postgres.h" +#include "cdb/cdbdisp.h" +#include "cdb/cdbdisp_async.h" #include "executor/executor.h" #include "storage/bufmgr.h" -#include "utils/rel.h" - +#include "utils/resowner.h" #include "diskquota.h" +#define CHECKED_OID_LIST_NUM 64 + static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); -static bool quota_check_ReadBufferExtendCheckPerms(Relation reln, ForkNumber forkNum, - BlockNumber blockNum, ReadBufferMode mode, - BufferAccessStrategy strategy); +static bool quota_check_DispatcherCheckPerms(void); static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook; -static ReadBufferExtended_hook_type prev_ReadBufferExtended_hook; +static DispatcherCheckPerms_hook_type prev_DispatcherCheckPerms_hook; +static void diskquota_free_callback(ResourceReleasePhase phase, bool isCommit, bool isTopLevel, void *arg); + +/* result relation need to be checked in dispatcher */ +static Oid checked_reloid_list[CHECKED_OID_LIST_NUM]; +static int checked_reloid_list_count = 0; /* * Initialize enforcement hooks. @@ -36,13 +44,33 @@ init_disk_quota_enforcement(void) prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; - /* enforcement hook during query is loading data*/ - prev_ReadBufferExtended_hook = ReadBufferExtended_hook; - ReadBufferExtended_hook = quota_check_ReadBufferExtendCheckPerms; + /* enforcement hook during query is loading data */ + prev_DispatcherCheckPerms_hook = DispatcherCheckPerms_hook; + DispatcherCheckPerms_hook = quota_check_DispatcherCheckPerms; + + /* setup and reset the result relaiton checked list */ + memset(checked_reloid_list, 0, sizeof(Oid) * CHECKED_OID_LIST_NUM); + RegisterResourceReleaseCallback(diskquota_free_callback, NULL); +} + +/* + * Reset checked reloid list + * This may be called multiple times at different resource relase + * phase, but it's safe to reset the checked_reloid_list. + */ +static void +diskquota_free_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg) +{ + + checked_reloid_list_count = 0; + return; } /* - * Enformcent hook function before query is loading data. Throws an error if + * Enforcement hook function before query is loading data. Throws an error if * you try to INSERT, UPDATE or COPY into a table, and the quota has been exceeded. */ static bool @@ -59,33 +87,44 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) continue; /* - * Only check quota on inserts. UPDATEs may well increase - * space usage too, but we ignore that for now. + * Only check quota on inserts. UPDATEs may well increase space usage + * too, but we ignore that for now. */ if ((rte->requiredPerms & ACL_INSERT) == 0 && (rte->requiredPerms & ACL_UPDATE) == 0) continue; - /* Perform the check as the relation's owner and namespace */ + /* + * Given table oid, check whether the quota limit of table's schema or + * table's owner are reached. This function will ereport(ERROR) when + * quota limit exceeded. + */ quota_check_common(rte->relid); + checked_reloid_list[checked_reloid_list_count++] = rte->relid; } - return true; } /* - * Enformcent hook function when query is loading data. Throws an error if - * you try to extend a buffer page, and the quota has been exceeded. + * Enformcent hook function when query is loading data. Throws an error if + * the quota has been exceeded. */ static bool -quota_check_ReadBufferExtendCheckPerms(Relation reln, pg_attribute_unused() ForkNumber forkNum, - pg_attribute_unused() BlockNumber blockNum, - pg_attribute_unused() ReadBufferMode mode, - pg_attribute_unused() BufferAccessStrategy strategy) +quota_check_DispatcherCheckPerms(void) { + int i; /* Perform the check as the relation's owner and namespace */ - quota_check_common(reln->rd_id); + for (i = 0; i < checked_reloid_list_count; i++) + { + Oid relid = checked_reloid_list[i]; + + /* + * Given table oid, check whether the quota limit of table's schema or + * table's owner are reached. This function will ereport(ERROR) when + * quota limit exceeded. + */ + quota_check_common(relid); + } return true; } - diff --git a/expected/clean.out b/expected/clean.out index b8578682904..4712add5c30 100644 --- a/expected/clean.out +++ b/expected/clean.out @@ -1,4 +1,4 @@ -drop table badquota.t1; -drop role testbody; -drop schema badquota; -drop extension diskquota; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; +DROP EXTENSION diskquota; diff --git a/expected/empty.out b/expected/empty.out deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/expected/fini.out b/expected/fini.out new file mode 100644 index 00000000000..cff593cf3ad --- /dev/null +++ b/expected/fini.out @@ -0,0 +1,9 @@ +\! gpconfig -c diskquota.monitor_databases -v postgres > /dev/null +\! echo $? +0 +-- start_ignore +\! gpstop -u > /dev/null +\! echo $? +0 +-- end_ignore +\! sleep 2 diff --git a/expected/init.out b/expected/init.out new file mode 100644 index 00000000000..feba3dcdd93 --- /dev/null +++ b/expected/init.out @@ -0,0 +1,26 @@ +-- start_ignore +\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpstop -raf > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpconfig -c diskquota.monitor_databases -v contrib_regression > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpstop -u > /dev/null +-- end_ignore +\! echo $? +0 +\! sleep 10 diff --git a/expected/prepare.out b/expected/prepare.out index 647fb8dafd8..365debe9b8d 100644 --- a/expected/prepare.out +++ b/expected/prepare.out @@ -1,36 +1,49 @@ -create extension diskquota; -select pg_sleep(1); +CREATE EXTENSION diskquota; +-- start_ignore +\! gpstop -u +20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Starting gpstop with args: -u +20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Gathering information and validating the environment... +20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Obtaining Greenplum Master catalog information +20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master... +20181119:10:38:23:019976 gpstop:instance-1:huanzhang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.13149.g1ff3481 build dev-oss' +20181119:10:38:23:019976 gpstop:instance-1:huanzhang-[INFO]:-Signalling all postmaster processes to reload +. +-- end_ignore +SELECT pg_sleep(1); pg_sleep ---------- (1 row) -\! pg_ctl -D /tmp/pg_diskquota_test/data reload -server signaled \! cp data/csmall.txt /tmp/csmall.txt -select pg_sleep(5); +SELECT pg_sleep(15); pg_sleep ---------- (1 row) -- prepare a schema that has reached quota limit -create schema badquota; -select diskquota.set_schema_quota('badquota', '1 MB'); +CREATE SCHEMA badquota; +SELECT diskquota.set_schema_quota('badquota', '1 MB'); set_schema_quota ------------------ (1 row) -create role testbody; -create table badquota.t1(i int); -alter table badquota.t1 owner to testbody; -insert into badquota.t1 select generate_series(0, 50000); -select pg_sleep(5); +CREATE ROLE testbody; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE badquota.t1(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); +ERROR: schema's disk space quota exceeded with name:badquota +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -insert into badquota.t1 select generate_series(0, 10); +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); ERROR: schema's disk space quota exceeded with name:badquota diff --git a/expected/test_column.out b/expected/test_column.out index 5a6762b737e..48ffe1ae965 100644 --- a/expected/test_column.out +++ b/expected/test_column.out @@ -1,32 +1,39 @@ -- Test alter table add column -create schema scolumn; -select diskquota.set_schema_quota('scolumn', '1 MB'); +CREATE SCHEMA scolumn; +SELECT diskquota.set_schema_quota('scolumn', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to scolumn; -select pg_sleep(5); +SET search_path TO scolumn; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -create table a2(i int); -insert into a2 select generate_series(1,20000); -insert into a2 select generate_series(1,10); -ALTER TABLE a2 ADD COLUMN j varchar(50); -update a2 set j = 'add value for column j'; -select pg_sleep(5); +CREATE TABLE a2(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect fail +INSERT INTO a2 SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:scolumn +-- expect fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:scolumn +ALTER TABLE a2 ADD COLUMN j VARCHAR(50); +UPDATE a2 SET j = 'add value for column j'; +ERROR: schema's disk space quota exceeded with name:scolumn +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert failed after add column -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:scolumn -drop table a2; -reset search_path; -drop schema scolumn; +DROP TABLE a2; +RESET search_path; +DROP SCHEMA scolumn; diff --git a/expected/test_copy.out b/expected/test_copy.out index 5c00c476016..ccd8f941166 100644 --- a/expected/test_copy.out +++ b/expected/test_copy.out @@ -1,27 +1,28 @@ -- Test copy -create schema s3; -select diskquota.set_schema_quota('s3', '1 MB'); +CREATE SCHEMA s3; +SELECT diskquota.set_schema_quota('s3', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s3; -create table c (i int); -copy c from '/tmp/csmall.txt'; +SET search_path TO s3; +CREATE TABLE c (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +COPY c FROM '/tmp/csmall.txt'; -- expect failed -insert into c select generate_series(1,100000000); +INSERT INTO c SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s3 -select pg_sleep(5); +SELECT pg_sleep(5); pg_sleep ---------- (1 row) --- select pg_total_table_size('c'); -- expect copy fail -copy c from '/tmp/csmall.txt'; +COPY c FROM '/tmp/csmall.txt'; ERROR: schema's disk space quota exceeded with name:s3 -drop table c; -reset search_path; -drop schema s3; +DROP TABLE c; +RESET search_path; +DROP SCHEMA s3; diff --git a/expected/test_delete_quota.out b/expected/test_delete_quota.out new file mode 100644 index 00000000000..d521e8dac4c --- /dev/null +++ b/expected/test_delete_quota.out @@ -0,0 +1,40 @@ +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO deleteschema; +CREATE TABLE c (i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect failed +INSERT INTO c SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:deleteschema +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:deleteschema +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +INSERT INTO c SELECT generate_series(1,100); +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/expected/test_drop_table.out b/expected/test_drop_table.out index 354be6bee55..7723bb2da6b 100644 --- a/expected/test_drop_table.out +++ b/expected/test_drop_table.out @@ -1,29 +1,33 @@ -- Test Drop table -create schema sdrtbl; -select diskquota.set_schema_quota('sdrtbl', '1 MB'); +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to sdrtbl; -create table a(i int); -create table a2(i int); -insert into a select generate_series(1,100); +SET search_path TO sdrtbl; +CREATE TABLE a(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE a2(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:sdrtbl -- expect insert fail -insert into a2 select generate_series(1,100); +INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:sdrtbl -drop table a; -select pg_sleep(5); +DROP TABLE a; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -insert into a2 select generate_series(1,100); -drop table a2; -reset search_path; -drop schema sdrtbl; +INSERT INTO a2 SELECT generate_series(1,100); +DROP TABLE a2; +RESET search_path; +DROP SCHEMA sdrtbl; diff --git a/expected/test_mistake.out b/expected/test_mistake.out new file mode 100644 index 00000000000..e8d3dd7cacf --- /dev/null +++ b/expected/test_mistake.out @@ -0,0 +1,8 @@ +-- to make sure that the schema 'notfoundns' is really not found +select nspname from pg_namespace where nspname = 'notfoundns'; + nspname +--------- +(0 rows) + +select diskquota.set_schema_quota('notfoundns', '1 MB'); +ERROR: schema "notfoundns" does not exist diff --git a/expected/test_partition.out b/expected/test_partition.out index cea8f4cf21e..87ce554c44c 100644 --- a/expected/test_partition.out +++ b/expected/test_partition.out @@ -1,49 +1,54 @@ -- Test partition table -create schema s8; -select diskquota.set_schema_quota('s8', '1 MB'); +CREATE SCHEMA s8; +SELECT diskquota.SET_schema_quota('s8', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s8; +SET search_path TO s8; CREATE TABLE measurement ( city_id int not null, logdate date not null, peaktemp int, unitsales int -)PARTITION BY RANGE (logdate); -CREATE TABLE measurement_y2006m02 PARTITION OF measurement - FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); -CREATE TABLE measurement_y2006m03 PARTITION OF measurement - FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); -insert into measurement select generate_series(1,15000), '2006-02-01' ,1,1; -select pg_sleep(5); +)PARTITION BY RANGE (logdate) +( + PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, + PARTITION Mar06 START (date '2006-03-01') INCLUSIVE + END (date '2016-04-01') EXCLUSIVE +); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +NOTICE: CREATE TABLE will create partition "measurement_1_prt_feb06" for table "measurement" +NOTICE: CREATE TABLE will create partition "measurement_1_prt_mar06" for table "measurement" +INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -insert into measurement select 1, '2006-02-01' ,1,1; +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail -insert into measurement select generate_series(1,100000000), '2006-03-02' ,1,1; +INSERT INTO measurement SELECT generate_series(1,100000000), '2006-03-02' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 -- expect insert fail -insert into measurement select 1, '2006-02-01' ,1,1; +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 -- expect insert fail -insert into measurement select 1, '2006-03-03' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 -delete from measurement where logdate='2006-03-02'; -vacuum full measurement; -select pg_sleep(5); +DELETE FROM measurement WHERE logdate='2006-03-02'; +VACUUM FULL measurement; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -insert into measurement select 1, '2006-02-01' ,1,1; -insert into measurement select 1, '2006-03-03' ,1,1; -drop table measurement; -reset search_path; -drop schema s8; +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +DROP TABLE measurement; +RESET search_path; +DROP SCHEMA s8; diff --git a/expected/test_rename.out b/expected/test_rename.out index d089dfdb77d..dfaebffd907 100644 --- a/expected/test_rename.out +++ b/expected/test_rename.out @@ -1,60 +1,65 @@ -- test rename schema -create schema srs1; -select diskquota.set_schema_quota('srs1', '1 MB'); +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); set_schema_quota ------------------ (1 row) set search_path to srs1; -create table a(i int); +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:srs1 -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:srs1 -alter schema srs1 rename to srs2; -set search_path to srs2; +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:srs2 -- test rename table -alter table a rename to a2; +ALTER TABLE a RENAME TO a2; -- expect insert fail -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:srs2 -drop table a2; -reset search_path; -drop schema srs2; +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; -- test rename role -create schema srr1; -create role srerole nologin; -select diskquota.set_role_quota('srerole', '1MB'); +CREATE SCHEMA srr1; +CREATE ROLE srerole NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('srerole', '1MB'); set_role_quota ---------------- (1 row) -set search_path to srr1; -create table a(i int); -alter table a owner to srerole; +SET search_path TO srr1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO srerole; -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); ERROR: role's disk space quota exceeded with name:srerole -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); ERROR: role's disk space quota exceeded with name:srerole -alter role srerole rename to srerole2; +ALTER ROLE srerole RENAME TO srerole2; -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); ERROR: role's disk space quota exceeded with name:srerole2 -- test rename table -alter table a rename to a2; +ALTER TABLE a RENAME TO a2; -- expect insert fail -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); ERROR: role's disk space quota exceeded with name:srerole2 -drop table a2; -drop role srerole2; -reset search_path; -drop schema srr1; +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; diff --git a/expected/test_reschema.out b/expected/test_reschema.out index 2c2c0faf792..41e7c2cb2d4 100644 --- a/expected/test_reschema.out +++ b/expected/test_reschema.out @@ -1,34 +1,36 @@ -- Test re-set_schema_quota -create schema srE; -select diskquota.set_schema_quota('srE', '1 MB'); +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to srE; -create table a(i int); +SET search_path TO srE; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail -insert into a select generate_series(1,1000000000); +INSERT INTO a SELECT generate_series(1,1000000000); ERROR: schema's disk space quota exceeded with name:sre -- expect insert fail when exceed quota limit -insert into a select generate_series(1,1000); +INSERT INTO a SELECT generate_series(1,1000); ERROR: schema's disk space quota exceeded with name:sre -- set schema quota larger -select diskquota.set_schema_quota('srE', '1 GB'); +SELECT diskquota.set_schema_quota('srE', '1 GB'); set_schema_quota ------------------ (1 row) -select pg_sleep(5); +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert succeed -insert into a select generate_series(1,1000); -drop table a; -reset search_path; -drop schema srE; +INSERT INTO a SELECT generate_series(1,1000); +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; diff --git a/expected/test_role.out b/expected/test_role.out index 4e998a6fc84..fc8f70364bf 100644 --- a/expected/test_role.out +++ b/expected/test_role.out @@ -1,40 +1,46 @@ -- Test role quota -create schema srole; -set search_path to srole; -CREATE role u1 NOLOGIN; -CREATE role u2 NOLOGIN; -CREATE TABLE b (t text); +CREATE SCHEMA srole; +SET search_path TO srole; +CREATE ROLE u1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE u2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t text); +CREATE TABLE b2 (t TEXT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO u1; -select diskquota.set_role_quota('u1', '1 MB'); +SELECT diskquota.set_role_quota('u1', '1 MB'); set_role_quota ---------------- (1 row) -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); -- expect insert fail -insert into b select generate_series(1,100000000); +INSERT INTO b SELECT generate_series(1,100000000); ERROR: role's disk space quota exceeded with name:u1 -- expect insert fail -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u1 -- expect insert fail -insert into b2 select generate_series(1,100); +INSERT INTO b2 SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u1 -alter table b owner to u2; -select pg_sleep(5); +ALTER TABLE b OWNER TO u2; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert succeed -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed -insert into b2 select generate_series(1,100); -drop table b, b2; -drop role u1, u2; -reset search_path; -drop schema srole; +INSERT INTO b2 SELECT generate_series(1,100); +DROP TABLE b, b2; +DROP ROLE u1, u2; +RESET search_path; +DROP SCHEMA srole; diff --git a/expected/test_schema.out b/expected/test_schema.out index a512464fb47..547ac8ded4e 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -8,6 +8,8 @@ select diskquota.set_schema_quota('s1', '1 MB'); set search_path to s1; create table a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into a select generate_series(1,100); -- expect insert fail insert into a select generate_series(1,100000000); @@ -16,6 +18,8 @@ ERROR: schema's disk space quota exceeded with name:s1 insert into a select generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 create table a2(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail insert into a2 select generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 @@ -29,13 +33,19 @@ select pg_sleep(5); (1 row) -- expect insert succeed -insert into a2 select generate_series(1,20000); +insert into a2 select generate_series(1,200); -- expect insert succeed -insert into s2.a select generate_series(1,20000); +insert into s2.a select generate_series(1,200); alter table s2.a set schema badquota; -- expect failed insert into badquota.a select generate_series(0, 100); ERROR: schema's disk space quota exceeded with name:badquota +select schema_name, quota_in_mb from diskquota.show_schema_quota_view where schema_name = 's1'; + schema_name | quota_in_mb +-------------+------------- + s1 | 1 +(1 row) + reset search_path; drop table s1.a2, badquota.a; drop schema s1, s2; diff --git a/expected/test_temp_role.out b/expected/test_temp_role.out index 2b800d2b055..798df7d5283 100644 --- a/expected/test_temp_role.out +++ b/expected/test_temp_role.out @@ -1,31 +1,37 @@ -- Test temp table restrained by role id -create schema strole; -create role u3temp nologin; -set search_path to strole; -select diskquota.set_role_quota('u3temp', '1MB'); +CREATE SCHEMA strole; +CREATE ROLE u3temp NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SET search_path TO strole; +SELECT diskquota.set_role_quota('u3temp', '1MB'); set_role_quota ---------------- (1 row) -create table a(i int); -alter table a owner to u3temp; -create temp table ta(i int); -alter table ta owner to u3temp; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table -insert into ta select generate_series(1,100000000); +INSERT INTO ta SELECT generate_series(1,100000000); ERROR: role's disk space quota exceeded with name:u3temp -- expected failed: -insert into a select generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u3temp -drop table ta; -select pg_sleep(5); +DROP TABLE ta; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -insert into a select generate_series(1,100); -drop table a; -reset search_path; -drop schema strole; +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/expected/test_toast.out b/expected/test_toast.out index 454cf3f50aa..d530a084ef7 100644 --- a/expected/test_toast.out +++ b/expected/test_toast.out @@ -1,19 +1,21 @@ -- Test toast -create schema s5; -select diskquota.set_schema_quota('s5', '1 MB'); +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s5; +SET search_path TO s5; CREATE TABLE a5 (message text); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'message' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,10000)) FROM generate_series(1,10); -select pg_sleep(5); +SELECT pg_sleep(5); pg_sleep ---------- @@ -26,6 +28,6 @@ SELECT (SELECT FROM generate_series(1,100000)) FROM generate_series(1,1000000); ERROR: schema's disk space quota exceeded with name:s5 -drop table a5; -reset search_path; -drop schema s5; +DROP TABLE a5; +RESET search_path; +DROP SCHEMA s5; diff --git a/expected/test_truncate.out b/expected/test_truncate.out index 4c1ad13606f..e8d40d0da3e 100644 --- a/expected/test_truncate.out +++ b/expected/test_truncate.out @@ -1,36 +1,41 @@ -- Test truncate -create schema s7; -select diskquota.set_schema_quota('s7', '1 MB'); +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s7; -create table a (i int); -create table b (i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +SET search_path TO s7; +CREATE TABLE a (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE b (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:s7 +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert fail -insert into a select generate_series(1,30); +INSERT INTO a SELECT generate_series(1,30); ERROR: schema's disk space quota exceeded with name:s7 -insert into b select generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); ERROR: schema's disk space quota exceeded with name:s7 -truncate table a; -select pg_sleep(5); +TRUNCATE TABLE a; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert succeed -insert into a select generate_series(1,30); -insert into b select generate_series(1,30); -drop table a, b; -reset search_path; -drop schema s7; +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s7; diff --git a/expected/test_update.out b/expected/test_update.out index cffde58595d..b95095fa857 100644 --- a/expected/test_update.out +++ b/expected/test_update.out @@ -1,23 +1,26 @@ -- Test Update -create schema s4; -select diskquota.set_schema_quota('s4', '1 MB'); +CREATE SCHEMA s4; +SELECT diskquota.set_schema_quota('s4', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s4; -create table a(i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +SET search_path TO s4; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:s4 +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect update fail. -update a set i = 100; +UPDATE a SET i = 100; ERROR: schema's disk space quota exceeded with name:s4 -drop table a; -reset search_path; -drop schema s4; +DROP TABLE a; +RESET search_path; +DROP SCHEMA s4; diff --git a/expected/test_vacuum.out b/expected/test_vacuum.out index d5256b0a640..197c60d1aa5 100644 --- a/expected/test_vacuum.out +++ b/expected/test_vacuum.out @@ -1,38 +1,43 @@ -- Test vacuum full -create schema s6; -select diskquota.set_schema_quota('s6', '1 MB'); +CREATE SCHEMA s6; +SELECT diskquota.set_schema_quota('s6', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s6; -create table a (i int); -create table b (i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +SET search_path TO s6; +CREATE TABLE a (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE b (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:s6 +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:s6 -- expect insert fail -insert into b select generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:s6 -delete from a where i > 10; -vacuum full a; -select pg_sleep(5); +DELETE FROM a WHERE i > 10; +VACUUM FULL a; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert succeed -insert into a select generate_series(1,10); -insert into b select generate_series(1,10); -drop table a, b; -reset search_path; -drop schema s6; +INSERT INTO a SELECT generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s6; diff --git a/gp_activetable.c b/gp_activetable.c new file mode 100644 index 00000000000..cbe0c1a80b9 --- /dev/null +++ b/gp_activetable.c @@ -0,0 +1,797 @@ +/* ------------------------------------------------------------------------- + * + * activetable.c + * + * This code is responsible for detecting active table for databases + * + * Copyright (c) 2018-Present Pivotal Software, Inc. + * + * IDENTIFICATION + * gpcontrib/gp_diskquota/gp_activetable.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/indexing.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "cdb/cdbbufferedappend.h" +#include "cdb/cdbdisp_query.h" +#include "cdb/cdbdispatchresult.h" +#include "cdb/cdbvars.h" +#include "executor/spi.h" +#include "fmgr.h" +#include "funcapi.h" +#include "libpq-fe.h" +#include "miscadmin.h" +#include "storage/shmem.h" +#include "storage/smgr.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/relfilenodemap.h" + +#include "gp_activetable.h" +#include "diskquota.h" + + +/* The results set cache for SRF call*/ +typedef struct DiskQuotaSetOFCache +{ + HTAB *result; + HASH_SEQ_STATUS pos; +} DiskQuotaSetOFCache; + +HTAB *active_tables_map = NULL; + +/* active table hooks*/ +static BufferedAppendWrite_hook_type prev_BufferedAppendWrite_hook = NULL; +static smgrcreate_hook_type prev_smgrcreate_hook = NULL; +static smgrextend_hook_type prev_smgrextend_hook = NULL; +static smgrtruncate_hook_type prev_smgrtruncate_hook = NULL; +static void active_table_hook_smgrcreate(SMgrRelation reln, + ForkNumber forknum, + bool isRedo); +static void active_table_hook_smgrextend(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum, + char *buffer, + bool skipFsync); +static void active_table_hook_smgrtruncate(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum); + +PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); + +static HTAB *get_active_tables_stats(ArrayType *array); +static HTAB *get_all_tables_size(void); +static HTAB *get_active_tables(void); +static StringInfoData convert_map_to_string(HTAB *active_list); +static HTAB *pull_active_list_from_seg(void); +static void report_active_table_SmgrStat(SMgrRelation reln); +static void report_active_table_AO(BufferedAppend * bufferedAppend); + +void init_active_table_hook(void); +void init_shm_worker_active_tables(void); +void init_lock_active_tables(void); +HTAB *gp_fetch_active_tables(bool force); + +/* + * Register smgr hook to detect active table. + */ +void +init_active_table_hook(void) +{ + prev_smgrcreate_hook = smgrcreate_hook; + smgrcreate_hook = active_table_hook_smgrcreate; + + prev_smgrextend_hook = smgrextend_hook; + smgrextend_hook = active_table_hook_smgrextend; + + prev_smgrtruncate_hook = smgrtruncate_hook; + smgrtruncate_hook = active_table_hook_smgrtruncate; + + prev_BufferedAppendWrite_hook = BufferedAppendWrite_hook; + BufferedAppendWrite_hook = report_active_table_AO; +} + +static void +active_table_hook_smgrcreate(SMgrRelation reln, + ForkNumber forknum, + bool isRedo) +{ + if (prev_smgrcreate_hook) + (*prev_smgrcreate_hook) (reln, forknum, isRedo); + + report_active_table_SmgrStat(reln); +} + +static void +active_table_hook_smgrextend(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum, + char *buffer, + bool skipFsync) +{ + if (prev_smgrextend_hook) + (*prev_smgrextend_hook) (reln, forknum, blocknum, buffer, skipFsync); + + report_active_table_SmgrStat(reln); +} + +static void +active_table_hook_smgrtruncate(SMgrRelation reln, + ForkNumber forknum, + BlockNumber blocknum) +{ + if (prev_smgrtruncate_hook) + (*prev_smgrtruncate_hook) (reln, forknum, blocknum); + + report_active_table_SmgrStat(reln); +} + +/* + * Init active_tables_map shared memory + */ +void +init_shm_worker_active_tables(void) +{ + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + + ctl.keysize = sizeof(DiskQuotaActiveTableEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hash = tag_hash; + + active_tables_map = ShmemInitHash("active_tables", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); +} + +/* + * Init lock of active table map + */ +void +init_lock_active_tables(void) +{ + bool found = false; + + active_table_shm_lock = ShmemInitStruct("disk_quota_active_table_shm_lock", + sizeof(disk_quota_shared_state), + &found); + + if (!found) + { + active_table_shm_lock->lock = LWLockAssign(); + } +} + +/* + * Common function for reporting active tables, used by smgr and ao + */ + +static void +report_active_table_helper(const RelFileNodeBackend *relFileNode) +{ + DiskQuotaActiveTableFileEntry *entry; + DiskQuotaActiveTableFileEntry item; + bool found = false; + + MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); + item.dbid = relFileNode->node.dbNode; + item.relfilenode = relFileNode->node.relNode; + item.tablespaceoid = relFileNode->node.spcNode; + + LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); + if (entry && !found) + *entry = item; + LWLockRelease(active_table_shm_lock->lock); + + if (!found && entry == NULL) + { + /* + * We may miss the file size change of this relation at current + * refresh interval. + */ + ereport(WARNING, (errmsg("Share memory is not enough for active tables."))); + } +} + +/* + * Hook function in smgr to report the active table + * information and store them in active table shared memory + * diskquota worker will consuming these active tables and + * recalculate their file size to update diskquota model. + */ +static void +report_active_table_SmgrStat(SMgrRelation reln) +{ + report_active_table_helper(&reln->smgr_rnode); +} + +/* + * Hook function in BufferedAppendWrite to report the active table, used by + * diskquota + */ +static void +report_active_table_AO(BufferedAppend * bufferedAppend) +{ + if (prev_BufferedAppendWrite_hook) + (*prev_BufferedAppendWrite_hook) (bufferedAppend); + report_active_table_helper(&bufferedAppend->relFileNode); +} + +/* + * Function to get the table size from each segments + * There are two mode: 1. calcualte disk usage for all + * the tables, which is called when init the disk quota model. + * 2. calculate the active table size when refreshing the + * disk quota model. + */ +Datum +diskquota_fetch_table_stat(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + int32 model = PG_GETARG_INT32(0); + AttInMetadata *attinmeta; + bool isFirstCall = true; + + HTAB *localCacheTable = NULL; + DiskQuotaSetOFCache *cache = NULL; + DiskQuotaActiveTableEntry *results_entry = NULL; + + /* Init the container list in the first call and get the results back */ + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcontext; + TupleDesc tupdesc; + + /* create a function context for cross-call persistence */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + if (Gp_role == GP_ROLE_DISPATCH || Gp_role == GP_ROLE_UTILITY) + { + ereport(ERROR, (errmsg("This function must not be called on master or by user"))); + } + + switch (model) + { + case FETCH_ALL_SIZE: + localCacheTable = get_all_tables_size(); + break; + case FETCH_ACTIVE_OID: + localCacheTable = get_active_tables(); + break; + case FETCH_ACTIVE_SIZE: + localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); + break; + default: + ereport(ERROR, (errmsg("Unused model number, transaction will be aborted"))); + break; + + } + + /* + * total number of active tables to be returned, each tuple contains + * one active table stat + */ + funcctx->max_calls = (uint32) hash_get_num_entries(localCacheTable); + + /* + * prepare attribute metadata for next calls that generate the tuple + */ + + tupdesc = CreateTemplateTupleDesc(2, false); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "TABLE_OID", + OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "TABLE_SIZE", + INT8OID, -1, 0); + + attinmeta = TupleDescGetAttInMetadata(tupdesc); + funcctx->attinmeta = attinmeta; + + /* Prepare SetOf results HATB */ + cache = (DiskQuotaSetOFCache *) palloc(sizeof(DiskQuotaSetOFCache)); + cache->result = localCacheTable; + hash_seq_init(&(cache->pos), localCacheTable); + + MemoryContextSwitchTo(oldcontext); + } + else + { + isFirstCall = false; + } + + funcctx = SRF_PERCALL_SETUP(); + + if (isFirstCall) + { + funcctx->user_fctx = (void *) cache; + } + else + { + cache = (DiskQuotaSetOFCache *) funcctx->user_fctx; + } + + /* return the results back to SPI caller */ + while ((results_entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&(cache->pos))) != NULL) + { + Datum result; + Datum values[2]; + bool nulls[2]; + HeapTuple tuple; + + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + + values[0] = ObjectIdGetDatum(results_entry->tableoid); + values[1] = Int64GetDatum(results_entry->tablesize); + + tuple = heap_form_tuple(funcctx->attinmeta->tupdesc, values, nulls); + + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + /* finished, do the clear staff */ + hash_destroy(cache->result); + pfree(cache); + SRF_RETURN_DONE(funcctx); +} + +/* + * Call pg_total_relation_size to calcualte the + * active table size on each segments. + */ +static HTAB * +get_active_tables_stats(ArrayType *array) +{ + int ndim = ARR_NDIM(array); + int *dims = ARR_DIMS(array); + int nitems; + int16 typlen; + bool typbyval; + char typalign; + char *ptr; + bits8 *bitmap; + int bitmask; + int i; + Oid relOid; + HTAB *local_table = NULL; + HASHCTL ctl; + DiskQuotaActiveTableEntry *entry; + + Assert(ARR_ELEMTYPE(array) == OIDOID); + + nitems = ArrayGetNItems(ndim, dims); + + get_typlenbyvalalign(ARR_ELEMTYPE(array), + &typlen, &typbyval, &typalign); + + + ptr = ARR_DATA_PTR(array); + bitmap = ARR_NULLBITMAP(array); + bitmask = 1; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_table = hash_create("local table map", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + for (i = 0; i < nitems; i++) + { + if (bitmap && (*bitmap & bitmask) == 0) + { + continue; + } + else + { + relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &relOid, HASH_ENTER, NULL); + entry->tableoid = relOid; + + /* + * avoid to generate ERROR if relOid is not existed (i.e. table + * has been droped) + */ + PG_TRY(); + { + entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(relOid))); + } + PG_CATCH(); + { + FlushErrorState(); + entry->tablesize = 0; + } + PG_END_TRY(); + + ptr = att_addlength_pointer(ptr, typlen, ptr); + ptr = (char *) att_align_nominal(ptr, typalign); + + } + + /* advance bitmap pointer if any */ + if (bitmap) + { + bitmask <<= 1; + if (bitmask == 0x100) + { + bitmap++; + bitmask = 1; + } + } + } + + return local_table; +} + + +HTAB * +get_all_tables_size(void) +{ + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HeapTuple tuple; + Relation classRel; + HeapScanDesc relScan; + + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + + classRel = heap_open(RelationRelationId, AccessShareLock); + relScan = heap_beginscan_catalog(classRel, 0, NULL); + + + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) + { + Oid relOid; + DiskQuotaActiveTableEntry *entry; + + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW) + continue; + relOid = HeapTupleGetOid(tuple); + + /* ignore system table */ + if (relOid < FirstNormalObjectId) + continue; + + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &relOid, HASH_ENTER, NULL); + + entry->tableoid = relOid; + entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(relOid))); + + } + + heap_endscan(relScan); + heap_close(classRel, AccessShareLock); + + return local_table_stats_map; +} + + +/* + * Get local active table with table oid and table size info. + * This function first copies active table map from shared memory + * to local active table map with refilenode info. Then traverses + * the local map and find corresponding table oid and table file + * size. Finnaly stores them into local active table map and return. + */ +HTAB * +get_active_tables(void) +{ + HASHCTL ctl; + HTAB *local_active_table_file_map = NULL; + HTAB *local_active_table_stats_map = NULL; + HASH_SEQ_STATUS iter; + DiskQuotaActiveTableFileEntry *active_table_file_entry; + DiskQuotaActiveTableEntry *active_table_entry; + + Oid relOid; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; + + local_active_table_file_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + /* Move active table from shared memory to local active table map */ + LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, active_tables_map); + + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + bool found; + DiskQuotaActiveTableFileEntry *entry; + + if (active_table_file_entry->dbid != MyDatabaseId) + { + continue; + } + + /* Add the active table entry into local hash table */ + entry = hash_search(local_active_table_file_map, active_table_file_entry, HASH_ENTER, &found); + if (entry) + *entry = *active_table_file_entry; + hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); + } + LWLockRelease(active_table_shm_lock->lock); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_active_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + /* traverse local active table map and calculate their file size. */ + hash_seq_init(&iter, local_active_table_file_map); + + /* + * scan whole local map, get the oid of each table and calculate the size + * of them + */ + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + bool found; + + relOid = RelidByRelfilenode(active_table_file_entry->tablespaceoid, active_table_file_entry->relfilenode); + + active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); + if (active_table_entry) + { + active_table_entry->tableoid = relOid; + active_table_entry->tablesize = 0; + } + } + elog(DEBUG1, "active table number is:%ld", hash_get_num_entries(local_active_table_file_map)); + hash_destroy(local_active_table_file_map); + return local_active_table_stats_map; +} + + +/* + * Worker process at master need to collect + * active table disk usage from all the segments. + * And aggregate the table size on each segment + * to obtainer the real table size at cluster level. + */ +HTAB * +gp_fetch_active_tables(bool force) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + int i, + j; + char *sql; + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HTAB *local_active_table_maps; + StringInfoData buffer; + StringInfoData map_string; + + Assert(Gp_role == GP_ROLE_DISPATCH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + if (force) + { + sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; + } + else + { + local_active_table_maps = pull_active_list_from_seg(); + map_string = convert_map_to_string(local_active_table_maps); + initStringInfo(&buffer); + appendStringInfo(&buffer, "select * from diskquota.diskquota_fetch_table_stat(2, '%s'::oid[])", + map_string.data); + sql = buffer.data; + } + + elog(DEBUG1, "CHECK SPI QUERY is %s", sql); + + CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); + + /* collect data from each segment */ + for (i = 0; i < cdb_pgresults.numResults; i++) + { + + Size tableSize; + bool found; + Oid tableOid; + DiskQuotaActiveTableEntry *entry; + + struct pg_result *pgresult = cdb_pgresults.pg_results[i]; + + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("unexpected result from segment: %d", + PQresultStatus(pgresult)))); + } + + for (j = 0; j < PQntuples(pgresult); j++) + { + tableOid = atooid(PQgetvalue(pgresult, j, 0)); + tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &tableOid, HASH_ENTER, &found); + + if (!found) + { + entry->tableoid = tableOid; + entry->tablesize = tableSize; + } + else + { + entry->tablesize = entry->tablesize + tableSize; + } + + } + + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); + return local_table_stats_map; +} + + +/* + * Convert a hash map with oids into a string array + * This function is used to prepare the second array parameter + * of function diskquota_fetch_table_stat. + */ +static StringInfoData +convert_map_to_string(HTAB *active_list) +{ + HASH_SEQ_STATUS iter; + StringInfoData buffer; + DiskQuotaActiveTableEntry *entry; + uint32 count = 0; + uint32 nitems = hash_get_num_entries(active_list); + + initStringInfo(&buffer); + appendStringInfo(&buffer, "{"); + elog(DEBUG1, "Try to convert size of active table is %ld", hash_get_num_entries(active_list)); + + hash_seq_init(&iter, active_list); + + while ((entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&iter)) != NULL) + { + count++; + if (count != nitems) + { + appendStringInfo(&buffer, "%d,", entry->tableoid); + } + else + { + appendStringInfo(&buffer, "%d", entry->tableoid); + } + } + appendStringInfo(&buffer, "}"); + + return buffer; +} + + +/* + * Get active table list from all the segments. + * Since when loading data, there is case where only subset for + * segment doing the real loading. As a result, the same table + * maybe active on some segemnts while not active on others. We + * haven't store the table size for each segment on master(to save + * memory), so when re-calcualte the table size, we need to sum the + * table size on all of the segments. + */ +static HTAB * +pull_active_list_from_seg(void) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + int i, + j; + char *sql; + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + DiskQuotaActiveTableEntry *entry; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + + sql = "select * from diskquota.diskquota_fetch_table_stat(1, '{}'::oid[])"; + + CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); + + for (i = 0; i < cdb_pgresults.numResults; i++) + { + + Oid tableOid; + bool found; + + struct pg_result *pgresult = cdb_pgresults.pg_results[i]; + + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("unexpected result from segment: %d", + PQresultStatus(pgresult)))); + } + + for (j = 0; j < PQntuples(pgresult); j++) + { + tableOid = atooid(PQgetvalue(pgresult, j, 0)); + + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &tableOid, HASH_ENTER, &found); + + if (!found) + { + entry->tableoid = tableOid; + entry->tablesize = 0; + } + + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); + + elog(DEBUG1, "The number of active table is %ld", hash_get_num_entries(local_table_stats_map)); + return local_table_stats_map; +} diff --git a/activetable.h b/gp_activetable.h similarity index 52% rename from activetable.h rename to gp_activetable.h index 76577b04de8..01a040493f6 100644 --- a/activetable.h +++ b/gp_activetable.h @@ -1,28 +1,31 @@ #ifndef ACTIVE_TABLE_H #define ACTIVE_TABLE_H +#include "storage/lwlock.h" #include "diskquota.h" /* Cache to detect the active table list */ typedef struct DiskQuotaActiveTableFileEntry { - Oid dbid; - Oid relfilenode; - Oid tablespaceoid; -} DiskQuotaActiveTableFileEntry; + Oid dbid; + Oid relfilenode; + Oid tablespaceoid; +} DiskQuotaActiveTableFileEntry; typedef struct DiskQuotaActiveTableEntry { - Oid tableoid; - Size tablesize; -} DiskQuotaActiveTableEntry; + Oid tableoid; + Size tablesize; +} DiskQuotaActiveTableEntry; - -extern HTAB* pg_fetch_active_tables(bool); +extern HTAB *gp_fetch_active_tables(bool force); extern void init_active_table_hook(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); extern HTAB *active_tables_map; -extern disk_quota_shared_state *active_table_shm_lock; +extern disk_quota_shared_state * active_table_shm_lock; + +#define atooid(x) ((Oid) strtoul((x), NULL, 10)) + #endif diff --git a/patch/pg_hooks.patch b/patch/pg_hooks.patch deleted file mode 100644 index 61dacc784ed..00000000000 --- a/patch/pg_hooks.patch +++ /dev/null @@ -1,191 +0,0 @@ -From 3d275c78b304b308d288bd227f6dcab45dc5f595 Mon Sep 17 00:00:00 2001 -From: Hubert Zhang -Date: Tue, 6 Nov 2018 06:51:22 +0000 -Subject: [PATCH] Add hooks for diskquota extension. - -Add ReadBufferExtend_hook() and smgr*_hook() -hook points to extend logic of storage -management. - -Co-authored-by: Haozhou Wang -Co-authored-by: Hubert Zhang -Co-authored-by: Hao Wu ---- - src/backend/storage/buffer/bufmgr.c | 14 ++++++++++++++ - src/backend/storage/smgr/smgr.c | 33 +++++++++++++++++++++++++++++++++ - src/include/storage/bufmgr.h | 10 ++++++++++ - src/include/storage/smgr.h | 18 ++++++++++++++++++ - 4 files changed, 75 insertions(+) - -diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c -index 01eabe5706..5499495506 100644 ---- a/src/backend/storage/buffer/bufmgr.c -+++ b/src/backend/storage/buffer/bufmgr.c -@@ -104,6 +104,13 @@ typedef struct CkptTsStatus - int index; - } CkptTsStatus; - -+/* -+ * Hook for plugins to check permissions when doing a buffer extend. -+ * One example is to check whether there is additional disk quota for -+ * the table to be inserted. -+ */ -+ReadBufferExtended_hook_type ReadBufferExtended_hook = NULL; -+ - /* GUC variables */ - bool zero_damaged_pages = false; - int bgwriter_lru_maxpages = 100; -@@ -661,6 +668,13 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, - * miss. - */ - pgstat_count_buffer_read(reln); -+ -+ /* hook function for doing a buffer extend */ -+ if (blockNum == P_NEW && ReadBufferExtended_hook) -+ { -+ (*ReadBufferExtended_hook)(reln, forkNum, blockNum, mode, strategy); -+ } -+ - buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence, - forkNum, blockNum, mode, strategy, &hit); - if (hit) -diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c -index 189342ef86..fa36a18e15 100644 ---- a/src/backend/storage/smgr/smgr.c -+++ b/src/backend/storage/smgr/smgr.c -@@ -90,6 +90,16 @@ static const f_smgr smgrsw[] = { - - static const int NSmgr = lengthof(smgrsw); - -+/* -+ * Hook for plugins to extend smgr functions. -+ * for example, collect statistics from smgr functions -+ * via recording the active relfilenode information. -+ */ -+smgrcreate_hook_type smgrcreate_hook = NULL; -+smgrextend_hook_type smgrextend_hook = NULL; -+smgrtruncate_hook_type smgrtruncate_hook = NULL; -+smgrdounlinkall_hook_type smgrdounlinkall_hook = NULL; -+ - - /* - * Each backend has a hashtable that stores all extant SMgrRelation objects. -@@ -397,6 +407,11 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) - if (isRedo && reln->md_num_open_segs[forknum] > 0) - return; - -+ if (smgrcreate_hook) -+ { -+ (*smgrcreate_hook)(reln, forknum, isRedo); -+ } -+ - /* - * We may be using the target table space for the first time in this - * database, so create a per-database subdirectory if needed. -@@ -411,6 +426,7 @@ smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo) - isRedo); - - smgrsw[reln->smgr_which].smgr_create(reln, forknum, isRedo); -+ - } - - /* -@@ -492,6 +508,11 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo) - if (nrels == 0) - return; - -+ if (smgrdounlinkall_hook) -+ { -+ (*smgrdounlinkall_hook)(rels, nrels, isRedo); -+ } -+ - /* - * create an array which contains all relations to be dropped, and close - * each relation's forks at the smgr level while at it -@@ -615,8 +636,14 @@ void - smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, - char *buffer, bool skipFsync) - { -+ if (smgrextend_hook) -+ { -+ (*smgrextend_hook)(reln, forknum, blocknum, buffer, skipFsync); -+ } -+ - smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum, - buffer, skipFsync); -+ - } - - /* -@@ -698,6 +725,11 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum) - void - smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) - { -+ if (smgrtruncate_hook) -+ { -+ (*smgrtruncate_hook)(reln, forknum, nblocks); -+ } -+ - /* - * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will - * just drop them without bothering to write the contents. -@@ -720,6 +752,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) - * Do the truncation. - */ - smgrsw[reln->smgr_which].smgr_truncate(reln, forknum, nblocks); -+ - } - - /* -diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h -index 3cce3906a0..f1dcc77bf7 100644 ---- a/src/include/storage/bufmgr.h -+++ b/src/include/storage/bufmgr.h -@@ -159,6 +159,16 @@ extern PGDLLIMPORT int32 *LocalRefCount; - */ - #define BufferGetPage(buffer) ((Page)BufferGetBlock(buffer)) - -+/* -+ * Hook for plugins to add external logic when doing a buffer extend. -+ * One example is to check whether there is additional disk quota for -+ * the table to be inserted. -+ */ -+typedef bool (*ReadBufferExtended_hook_type) (Relation reln, -+ ForkNumber forkNum, BlockNumber blockNum, -+ ReadBufferMode mode, BufferAccessStrategy strategy); -+extern PGDLLIMPORT ReadBufferExtended_hook_type ReadBufferExtended_hook; -+ - /* - * prototypes for functions in bufmgr.c - */ -diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h -index c843bbc969..d070b3d573 100644 ---- a/src/include/storage/smgr.h -+++ b/src/include/storage/smgr.h -@@ -144,5 +144,23 @@ extern void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, - extern void ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum); - extern void ForgetDatabaseFsyncRequests(Oid dbid); - extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo); -+/* -+ * Hook for plugins to extend smgr functions. -+ * for example, collect statistics from smgr functions -+ * via recording the active relfilenode information. -+ */ -+typedef void (*smgrcreate_hook_type)(SMgrRelation reln, ForkNumber forknum, -+ bool isRedo); -+extern PGDLLIMPORT smgrcreate_hook_type smgrcreate_hook; -+typedef void (*smgrextend_hook_type)(SMgrRelation reln, ForkNumber forknum, -+ BlockNumber blocknum, -+ char *buffer, bool skipFsync); -+extern PGDLLIMPORT smgrextend_hook_type smgrextend_hook; -+typedef void (*smgrtruncate_hook_type)(SMgrRelation reln, ForkNumber forknum, -+ BlockNumber nblocks); -+extern PGDLLIMPORT smgrtruncate_hook_type smgrtruncate_hook; -+typedef void (*smgrdounlinkall_hook_type)(SMgrRelation *rels, int nrels, -+ bool isRedo); -+extern PGDLLIMPORT smgrdounlinkall_hook_type smgrdounlinkall_hook; - - #endif /* SMGR_H */ --- -2.16.2 - diff --git a/quotamodel.c b/quotamodel.c index 40fb2603612..60f40bc96d4 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -2,11 +2,13 @@ * * quotamodel.c * - * This code is responsible for init disk quota model and refresh disk quota + * This code is responsible for init disk quota model and refresh disk quota * model. * - * Copyright (C) 2013, PostgreSQL Global Development Group + * Copyright (c) 2018-Present Pivotal Software, Inc. * + * IDENTIFICATION + * gpcontrib/gp_diskquota/quotamodel.c * * ------------------------------------------------------------------------- */ @@ -16,6 +18,7 @@ #include "access/htup_details.h" #include "access/reloptions.h" #include "access/transam.h" +#include "access/tupdesc.h" #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" @@ -37,7 +40,9 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" -#include "activetable.h" +#include + +#include "gp_activetable.h" #include "diskquota.h" /* cluster level max size of black list */ @@ -54,14 +59,17 @@ typedef struct QuotaLimitEntry QuotaLimitEntry; typedef struct BlackMapEntry BlackMapEntry; typedef struct LocalBlackMapEntry LocalBlackMapEntry; -/* local cache of table disk size and corresponding schema and owner */ +/* + * local cache of table disk size and corresponding schema and owner + */ struct TableSizeEntry { Oid reloid; Oid namespaceoid; Oid owneroid; int64 totalsize; - bool is_exist; /* flag used to check whether table is already dropped */ + bool is_exist; /* flag used to check whether table is already + * dropped */ }; /* local cache of namespace disk size */ @@ -96,8 +104,8 @@ struct BlackMapEntry /* local blacklist for which exceed their quota limit */ struct LocalBlackMapEntry { - BlackMapEntry keyitem; - bool isexceeded; + BlackMapEntry keyitem; + bool isexceeded; }; /* using hash table to support incremental update the table size entry.*/ @@ -111,7 +119,7 @@ static HTAB *role_quota_limit_map = NULL; static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; -static disk_quota_shared_state *black_map_shm_lock; +static disk_quota_shared_state * black_map_shm_lock; disk_quota_shared_state *active_table_shm_lock = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; @@ -142,7 +150,7 @@ DiskQuotaShmemSize(void) Size size; size = MAXALIGN(sizeof(disk_quota_shared_state)); - size = add_size(size, size); // two locks + size = add_size(size, size); /* two locks */ size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); return size; @@ -159,7 +167,7 @@ disk_quota_shmem_startup(void) HASHCTL hash_ctl; if (prev_shmem_startup_hook) - (*prev_shmem_startup_hook)(); + (*prev_shmem_startup_hook) (); black_map_shm_lock = NULL; disk_quota_black_map = NULL; @@ -167,12 +175,12 @@ disk_quota_shmem_startup(void) LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); black_map_shm_lock = ShmemInitStruct("disk_quota_black_map_shm_lock", - sizeof(disk_quota_shared_state), - &found); + sizeof(disk_quota_shared_state), + &found); if (!found) { - black_map_shm_lock->lock = &(GetNamedLWLockTranche("disk_quota_black_map_shm_lock"))->lock; + black_map_shm_lock->lock = LWLockAssign(); } init_lock_active_tables(); @@ -183,10 +191,10 @@ disk_quota_shmem_startup(void) hash_ctl.hash = tag_hash; disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", - INIT_DISK_QUOTA_BLACK_ENTRIES, - MAX_DISK_QUOTA_BLACK_ENTRIES, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + INIT_DISK_QUOTA_BLACK_ENTRIES, + MAX_DISK_QUOTA_BLACK_ENTRIES, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); init_shm_worker_active_tables(); @@ -202,8 +210,6 @@ init_disk_quota_shmem(void) * resources in pgss_shmem_startup(). */ RequestAddinShmemSpace(DiskQuotaShmemSize()); - RequestNamedLWLockTranche("disk_quota_black_map_shm_lock", 1); - RequestNamedLWLockTranche("disk_quota_active_table_shm_lock", 1); /* * Install startup hook to initialize our shared memory. @@ -220,7 +226,7 @@ init_disk_quota_model(void) { HASHCTL hash_ctl; - /* init hash table for table/schema/role etc.*/ + /* init hash table for table/schema/role etc. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(TableSizeEntry); @@ -228,9 +234,9 @@ init_disk_quota_model(void) hash_ctl.hash = oid_hash; table_size_map = hash_create("TableSizeEntry map", - 1024 * 8, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + 1024 * 8, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); @@ -239,9 +245,9 @@ init_disk_quota_model(void) hash_ctl.hash = oid_hash; namespace_size_map = hash_create("NamespaceSizeEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); @@ -261,14 +267,14 @@ init_disk_quota_model(void) hash_ctl.hash = oid_hash; namespace_quota_limit_map = hash_create("Namespace QuotaLimitEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); role_quota_limit_map = hash_create("Role QuotaLimitEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + 1024, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); @@ -277,20 +283,20 @@ init_disk_quota_model(void) hash_ctl.hash = tag_hash; local_disk_quota_black_map = hash_create("local blackmap whose quota limitation is reached", - MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, + &hash_ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } /* * diskquota worker will refresh disk quota model - * periodically. It will reload quota setting and + * periodically. It will reload quota setting and * recalculate the changed disk usage. */ void refresh_disk_quota_model(bool force) { - elog(DEBUG1,"check disk quota begin"); + elog(DEBUG1, "check disk quota begin"); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); @@ -302,11 +308,11 @@ refresh_disk_quota_model(bool force) SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); - elog(DEBUG1,"check disk quota end"); + elog(DEBUG1, "check disk quota end"); } /* - * Update the disk usage of nameapsce and role. + * Update the disk usage of namespace and role. * Put the exceeded namespace and role into shared black map. */ static void @@ -323,29 +329,30 @@ refresh_disk_quota_usage(bool force) /* * Generate the new shared blacklist from the local_black_list which * exceed the quota limit. - * local_balck_list is used to reduce the lock race. + * local_black_list is used to reduce the lock race. */ static void flush_local_black_map(void) { HASH_SEQ_STATUS iter; - LocalBlackMapEntry* localblackentry; - BlackMapEntry* blackentry; - bool found; + LocalBlackMapEntry *localblackentry; + BlackMapEntry *blackentry; + bool found; LWLockAcquire(black_map_shm_lock->lock, LW_EXCLUSIVE); - hash_seq_init(&iter, local_disk_quota_black_map); while ((localblackentry = hash_seq_search(&iter)) != NULL) { if (localblackentry->isexceeded) { - blackentry = (BlackMapEntry*) hash_search(disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_ENTER_NULL, &found); + blackentry = (BlackMapEntry *) hash_search(disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_ENTER_NULL, &found); if (blackentry == NULL) { - elog(WARNING, "shared disk quota black map size limit reached."); + elog(WARNING, "Shared disk quota black map size limit reached." + "Some out-of-limit schemas or roles will be lost" + "in blacklist."); } else { @@ -377,30 +384,32 @@ flush_local_black_map(void) * Compare the disk quota limit and current usage of a database object. * Put them into local blacklist if quota limit is exceeded. */ -static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) +static void +check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) { - bool found; - int32 quota_limit_mb; - int32 current_usage_mb; - LocalBlackMapEntry* localblackentry; - BlackMapEntry keyitem; + bool found; + int32 quota_limit_mb; + int32 current_usage_mb; + LocalBlackMapEntry *localblackentry; + BlackMapEntry keyitem; + + QuotaLimitEntry *quota_entry; - QuotaLimitEntry* quota_entry; if (type == NAMESPACE_QUOTA) { - quota_entry = (QuotaLimitEntry *)hash_search(namespace_quota_limit_map, - &targetOid, - HASH_FIND, &found); + quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, + &targetOid, + HASH_FIND, &found); } else if (type == ROLE_QUOTA) { - quota_entry = (QuotaLimitEntry *)hash_search(role_quota_limit_map, - &targetOid, - HASH_FIND, &found); + quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, + &targetOid, + HASH_FIND, &found); } else { - /* skip check if not namespace or role quota*/ + /* skip check if not namespace or role quota */ return; } @@ -411,18 +420,18 @@ static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaTyp } quota_limit_mb = quota_entry->limitsize; - current_usage_mb = current_usage / (1024 *1024); - if(current_usage_mb >= quota_limit_mb) + current_usage_mb = current_usage / (1024 * 1024); + if (current_usage_mb >= quota_limit_mb) { memset(&keyitem, 0, sizeof(BlackMapEntry)); keyitem.targetoid = targetOid; keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = (uint32)type; - elog(DEBUG1,"Put object %u to blacklist with quota limit:%d, current usage:%d", - targetOid, quota_limit_mb, current_usage_mb); - localblackentry = (LocalBlackMapEntry*) hash_search(local_disk_quota_black_map, - &keyitem, - HASH_ENTER, &found); + keyitem.targettype = (uint32) type; + elog(DEBUG1, "Put object %u to blacklist with quota limit:%d, current usage:%d", + targetOid, quota_limit_mb, current_usage_mb); + localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, + &keyitem, + HASH_ENTER, &found); localblackentry->isexceeded = true; } @@ -435,8 +444,8 @@ static void remove_namespace_map(Oid namespaceoid) { hash_search(namespace_size_map, - &namespaceoid, - HASH_REMOVE, NULL); + &namespaceoid, + HASH_REMOVE, NULL); } /* @@ -445,17 +454,19 @@ remove_namespace_map(Oid namespaceoid) static void update_namespace_map(Oid namespaceoid, int64 updatesize) { - bool found; - NamespaceSizeEntry* nsentry; - nsentry = (NamespaceSizeEntry *)hash_search(namespace_size_map, - &namespaceoid, - HASH_ENTER, &found); + bool found; + NamespaceSizeEntry *nsentry; + + nsentry = (NamespaceSizeEntry *) hash_search(namespace_size_map, + &namespaceoid, + HASH_ENTER, &found); if (!found) { nsentry->namespaceoid = namespaceoid; nsentry->totalsize = updatesize; } - else { + else + { nsentry->totalsize += updatesize; } @@ -468,8 +479,8 @@ static void remove_role_map(Oid owneroid) { hash_search(role_size_map, - &owneroid, - HASH_REMOVE, NULL); + &owneroid, + HASH_REMOVE, NULL); } /* @@ -478,17 +489,19 @@ remove_role_map(Oid owneroid) static void update_role_map(Oid owneroid, int64 updatesize) { - bool found; - RoleSizeEntry* rolentry; - rolentry = (RoleSizeEntry *)hash_search(role_size_map, - &owneroid, - HASH_ENTER, &found); + bool found; + RoleSizeEntry *rolentry; + + rolentry = (RoleSizeEntry *) hash_search(role_size_map, + &owneroid, + HASH_ENTER, &found); if (!found) { rolentry->owneroid = owneroid; rolentry->totalsize = updatesize; } - else { + else + { rolentry->totalsize += updatesize; } @@ -497,33 +510,33 @@ update_role_map(Oid owneroid, int64 updatesize) /* * Incremental way to update the disk quota of every database objects * Recalculate the table's disk usage when it's a new table or active table. - * Detect the removed table if it's nolonger in pg_class. + * Detect the removed table if it's no longer in pg_class. * If change happens, no matter size change or owner change, * update namespace_size_map and role_size_map correspondingly. - * Parameter 'force' set to true at initialization stage to recalculate + * Parameter 'force' set to true at initialization stage to recalculate * the file size of all the tables. * */ static void calculate_table_disk_usage(bool force) { - bool found; - bool active_tbl_found = false; + bool found; + bool active_tbl_found = false; Relation classRel; HeapTuple tuple; HeapScanDesc relScan; TableSizeEntry *tsentry = NULL; Oid relOid; HASH_SEQ_STATUS iter; - HTAB *local_active_table_stat_map; + HTAB *local_active_table_stat_map; DiskQuotaActiveTableEntry *active_table_entry; classRel = heap_open(RelationRelationId, AccessShareLock); relScan = heap_beginscan_catalog(classRel, 0, NULL); - local_active_table_stat_map = pg_fetch_active_tables(force); + local_active_table_stat_map = gp_fetch_active_tables(force); - /* unset is_exist flag for tsentry in table_size_map*/ + /* unset is_exist flag for tsentry in table_size_map */ hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { @@ -538,33 +551,46 @@ calculate_table_disk_usage(bool force) while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + found = false; if (classForm->relkind != RELKIND_RELATION && classForm->relkind != RELKIND_MATVIEW) continue; - relOid = classForm->oid; + relOid = HeapTupleGetOid(tuple); - /* ignore system table*/ - if(relOid < FirstNormalObjectId) + /* ignore system table */ + if (relOid < FirstNormalObjectId) continue; - tsentry = (TableSizeEntry *)hash_search(table_size_map, - &relOid, - HASH_ENTER, &found); + tsentry = (TableSizeEntry *) hash_search(table_size_map, + &relOid, + HASH_ENTER, &found); + + if (!found) + { + tsentry->totalsize = 0; + tsentry->owneroid = 0; + tsentry->namespaceoid = 0; + tsentry->reloid = 0; + } + /* mark tsentry is_exist */ if (tsentry) tsentry->is_exist = true; active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &relOid, HASH_FIND, &active_tbl_found); - /* skip to recalculate the tables which are not in active list and not at initializatio stage*/ - if(active_tbl_found) + /* + * skip to recalculate the tables which are not in active list and not + * at initializatio stage + */ + if (active_tbl_found) { - /* namespace and owner may be changed since last check*/ + /* namespace and owner may be changed since last check */ if (!found) { - /* if it's a new table*/ + /* if it's a new table */ tsentry->reloid = relOid; tsentry->namespaceoid = classForm->relnamespace; tsentry->owneroid = classForm->relowner; @@ -574,8 +600,12 @@ calculate_table_disk_usage(bool force) } else { - /* if not new table in table_size_map, it must be in active table list */ - int64 oldtotalsize = tsentry->totalsize; + /* + * if not new table in table_size_map, it must be in active + * table list + */ + int64 oldtotalsize = tsentry->totalsize; + tsentry->totalsize = (int64) active_table_entry->tablesize; update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); @@ -590,7 +620,7 @@ calculate_table_disk_usage(bool force) update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); } /* if owner change, transfer the file size */ - if(tsentry->owneroid != classForm->relowner) + if (tsentry->owneroid != classForm->relowner) { update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); tsentry->owneroid = classForm->relowner; @@ -612,8 +642,8 @@ calculate_table_disk_usage(bool force) update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); hash_search(table_size_map, - &tsentry->reloid, - HASH_REMOVE, NULL); + &tsentry->reloid, + HASH_REMOVE, NULL); continue; } } @@ -623,11 +653,13 @@ calculate_table_disk_usage(bool force) * Check the namespace quota limit and current usage * Remove dropped namespace from namespace_size_map */ -static void calculate_schema_disk_usage(void) +static void +calculate_schema_disk_usage(void) { HeapTuple tuple; HASH_SEQ_STATUS iter; - NamespaceSizeEntry* nsentry; + NamespaceSizeEntry *nsentry; + hash_seq_init(&iter, namespace_size_map); while ((nsentry = hash_seq_search(&iter)) != NULL) @@ -648,11 +680,13 @@ static void calculate_schema_disk_usage(void) * Check the role quota limit and current usage * Remove dropped role from roel_size_map */ -static void calculate_role_disk_usage(void) +static void +calculate_role_disk_usage(void) { HeapTuple tuple; HASH_SEQ_STATUS iter; - RoleSizeEntry* rolentry; + RoleSizeEntry *rolentry; + hash_seq_init(&iter, role_size_map); while ((rolentry = hash_seq_search(&iter)) != NULL) @@ -679,7 +713,7 @@ load_quotas(void) TupleDesc tupdesc; int i; bool found; - QuotaLimitEntry* quota_entry; + QuotaLimitEntry *quota_entry; HASH_SEQ_STATUS iter; RangeVar *rv; @@ -690,28 +724,33 @@ load_quotas(void) if (!rel) { /* configuration table is missing. */ - elog(LOG, "configuration table \"quota_config\" is missing in database \"%s\"," - " please recreate diskquota extension", + elog(LOG, "configuration table \"quota_config\" is missing in database \"%s\"," + " please recreate diskquota extension", get_database_name(MyDatabaseId)); return false; } heap_close(rel, NoLock); - /* clear entries in quota limit map*/ + /* + * TODO: we should skip to reload quota config when there is no change in + * quota.config. A flag in shared memory could be used to detect the quota + * config change. + */ + /* clear entries in quota limit map */ hash_seq_init(&iter, namespace_quota_limit_map); while ((quota_entry = hash_seq_search(&iter)) != NULL) { (void) hash_search(namespace_quota_limit_map, - (void *) "a_entry->targetoid, - HASH_REMOVE, NULL); + (void *) "a_entry->targetoid, + HASH_REMOVE, NULL); } hash_seq_init(&iter, role_quota_limit_map); while ((quota_entry = hash_seq_search(&iter)) != NULL) { (void) hash_search(role_quota_limit_map, - (void *) "a_entry->targetoid, - HASH_REMOVE, NULL); + (void *) "a_entry->targetoid, + HASH_REMOVE, NULL); } ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); @@ -720,12 +759,12 @@ load_quotas(void) tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 3 || - TupleDescAttr(tupdesc, 0)->atttypid != OIDOID || - TupleDescAttr(tupdesc, 1)->atttypid != INT4OID || - TupleDescAttr(tupdesc, 2)->atttypid != INT8OID) + ((tupdesc)->attrs[0])->atttypid != OIDOID || + ((tupdesc)->attrs[1])->atttypid != INT4OID || + ((tupdesc)->attrs[2])->atttypid != INT8OID) { - elog(LOG, "configuration table \"quota_config\" is corruptted in database \"%s\"," - " please recreate diskquota extension", + elog(LOG, "configuration table \"quota_config\" is corrupted in database \"%s\"," + " please recreate diskquota extension", get_database_name(MyDatabaseId)); return false; } @@ -743,11 +782,11 @@ load_quotas(void) if (isnull) continue; targetOid = DatumGetObjectId(dat); - + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); if (isnull) continue; - quotatype = (QuotaType)DatumGetInt32(dat); + quotatype = (QuotaType) DatumGetInt32(dat); dat = SPI_getbinval(tup, tupdesc, 3, &isnull); if (isnull) @@ -756,16 +795,16 @@ load_quotas(void) if (quotatype == NAMESPACE_QUOTA) { - quota_entry = (QuotaLimitEntry *)hash_search(namespace_quota_limit_map, - &targetOid, - HASH_ENTER, &found); + quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, + &targetOid, + HASH_ENTER, &found); quota_entry->limitsize = quota_limit_mb; } else if (quotatype == ROLE_QUOTA) { - quota_entry = (QuotaLimitEntry *)hash_search(role_quota_limit_map, - &targetOid, - HASH_ENTER, &found); + quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, + &targetOid, + HASH_ENTER, &found); quota_entry->limitsize = quota_limit_mb; } } @@ -784,6 +823,7 @@ get_rel_owner_schema(Oid relid, Oid *ownerOid, Oid *nsOid) if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + *ownerOid = reltup->relowner; *nsOid = reltup->relnamespace; ReleaseSysCache(tp); @@ -799,22 +839,23 @@ get_rel_owner_schema(Oid relid, Oid *ownerOid, Oid *nsOid) bool quota_check_common(Oid reloid) { - Oid ownerOid = InvalidOid; - Oid nsOid = InvalidOid; - bool found; + Oid ownerOid = InvalidOid; + Oid nsOid = InvalidOid; + bool found; BlackMapEntry keyitem; + memset(&keyitem, 0, sizeof(BlackMapEntry)); get_rel_owner_schema(reloid, &ownerOid, &nsOid); LWLockAcquire(black_map_shm_lock->lock, LW_SHARED); - if ( nsOid != InvalidOid) + if (nsOid != InvalidOid) { keyitem.targetoid = nsOid; keyitem.databaseoid = MyDatabaseId; keyitem.targettype = NAMESPACE_QUOTA; hash_search(disk_quota_black_map, - &keyitem, - HASH_FIND, &found); + &keyitem, + HASH_FIND, &found); if (found) { ereport(ERROR, @@ -825,19 +866,19 @@ quota_check_common(Oid reloid) } - if ( ownerOid != InvalidOid) + if (ownerOid != InvalidOid) { keyitem.targetoid = ownerOid; keyitem.databaseoid = MyDatabaseId; keyitem.targettype = ROLE_QUOTA; hash_search(disk_quota_black_map, - &keyitem, - HASH_FIND, &found); + &keyitem, + HASH_FIND, &found); if (found) { ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(ownerOid, false)))); + errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(ownerOid)))); return false; } } diff --git a/sql/clean.sql b/sql/clean.sql index b999009fb50..bf71fcb0d19 100644 --- a/sql/clean.sql +++ b/sql/clean.sql @@ -1,5 +1,5 @@ -drop table badquota.t1; -drop role testbody; -drop schema badquota; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; -drop extension diskquota; +DROP EXTENSION diskquota; diff --git a/sql/empty.sql b/sql/empty.sql deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/sql/fini.sql b/sql/fini.sql new file mode 100644 index 00000000000..fa380b454c5 --- /dev/null +++ b/sql/fini.sql @@ -0,0 +1,8 @@ +\! gpconfig -c diskquota.monitor_databases -v postgres > /dev/null +\! echo $? +-- start_ignore +\! gpstop -u > /dev/null +\! echo $? +-- end_ignore + +\! sleep 2 diff --git a/sql/init.sql b/sql/init.sql new file mode 100644 index 00000000000..9e18ee24e18 --- /dev/null +++ b/sql/init.sql @@ -0,0 +1,26 @@ +-- start_ignore +\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpstop -raf > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpconfig -c diskquota.monitor_databases -v contrib_regression > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpstop -u > /dev/null +-- end_ignore +\! echo $? + +\! sleep 10 diff --git a/sql/prepare.sql b/sql/prepare.sql index 0392c339b75..1c802fc8a90 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -1,15 +1,18 @@ -create extension diskquota; -select pg_sleep(1); -\! pg_ctl -D /tmp/pg_diskquota_test/data reload +CREATE EXTENSION diskquota; +-- start_ignore +\! gpstop -u +-- end_ignore +SELECT pg_sleep(1); \! cp data/csmall.txt /tmp/csmall.txt -select pg_sleep(5); +SELECT pg_sleep(15); -- prepare a schema that has reached quota limit -create schema badquota; -select diskquota.set_schema_quota('badquota', '1 MB'); -create role testbody; -create table badquota.t1(i int); -alter table badquota.t1 owner to testbody; -insert into badquota.t1 select generate_series(0, 50000); -select pg_sleep(5); -insert into badquota.t1 select generate_series(0, 10); +CREATE SCHEMA badquota; +SELECT diskquota.set_schema_quota('badquota', '1 MB'); +CREATE ROLE testbody; +CREATE TABLE badquota.t1(i INT); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); +SELECT pg_sleep(5); +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/sql/test_column.sql b/sql/test_column.sql index 62f0f0770d8..bc3775c7ef0 100644 --- a/sql/test_column.sql +++ b/sql/test_column.sql @@ -1,19 +1,21 @@ -- Test alter table add column -create schema scolumn; -select diskquota.set_schema_quota('scolumn', '1 MB'); -set search_path to scolumn; -select pg_sleep(5); +CREATE SCHEMA scolumn; +SELECT diskquota.set_schema_quota('scolumn', '1 MB'); +SET search_path TO scolumn; +SELECT pg_sleep(5); -create table a2(i int); -insert into a2 select generate_series(1,20000); -insert into a2 select generate_series(1,10); -ALTER TABLE a2 ADD COLUMN j varchar(50); -update a2 set j = 'add value for column j'; -select pg_sleep(5); +CREATE TABLE a2(i INT); +-- expect fail +INSERT INTO a2 SELECT generate_series(1,100000000); +-- expect fail +INSERT INTO a2 SELECT generate_series(1,10); +ALTER TABLE a2 ADD COLUMN j VARCHAR(50); +UPDATE a2 SET j = 'add value for column j'; +SELECT pg_sleep(5); -- expect insert failed after add column -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); -drop table a2; -reset search_path; -drop schema scolumn; +DROP TABLE a2; +RESET search_path; +DROP SCHEMA scolumn; diff --git a/sql/test_copy.sql b/sql/test_copy.sql index 07a525601d6..6d2c854e574 100644 --- a/sql/test_copy.sql +++ b/sql/test_copy.sql @@ -1,17 +1,16 @@ -- Test copy -create schema s3; -select diskquota.set_schema_quota('s3', '1 MB'); -set search_path to s3; +CREATE SCHEMA s3; +SELECT diskquota.set_schema_quota('s3', '1 MB'); +SET search_path TO s3; -create table c (i int); -copy c from '/tmp/csmall.txt'; +CREATE TABLE c (i int); +COPY c FROM '/tmp/csmall.txt'; -- expect failed -insert into c select generate_series(1,100000000); -select pg_sleep(5); --- select pg_total_table_size('c'); +INSERT INTO c SELECT generate_series(1,100000000); +SELECT pg_sleep(5); -- expect copy fail -copy c from '/tmp/csmall.txt'; +COPY c FROM '/tmp/csmall.txt'; -drop table c; -reset search_path; -drop schema s3; +DROP TABLE c; +RESET search_path; +DROP SCHEMA s3; diff --git a/sql/test_delete_quota.sql b/sql/test_delete_quota.sql new file mode 100644 index 00000000000..a46ae3b2646 --- /dev/null +++ b/sql/test_delete_quota.sql @@ -0,0 +1,19 @@ +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); +SET search_path TO deleteschema; + +CREATE TABLE c (i INT); +-- expect failed +INSERT INTO c SELECT generate_series(1,100000000); +SELECT pg_sleep(5); +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); +SELECT pg_sleep(5); + +INSERT INTO c SELECT generate_series(1,100); + +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/sql/test_drop_table.sql b/sql/test_drop_table.sql index 21147c38675..d1f9b434f89 100644 --- a/sql/test_drop_table.sql +++ b/sql/test_drop_table.sql @@ -1,20 +1,20 @@ -- Test Drop table -create schema sdrtbl; -select diskquota.set_schema_quota('sdrtbl', '1 MB'); -set search_path to sdrtbl; -create table a(i int); -create table a2(i int); -insert into a select generate_series(1,100); +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); +SET search_path TO sdrtbl; +CREATE TABLE a(i INT); +CREATE TABLE a2(i INT); +INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); -- expect insert fail -insert into a2 select generate_series(1,100); -drop table a; -select pg_sleep(5); -insert into a2 select generate_series(1,100); +INSERT INTO a2 SELECT generate_series(1,100); +DROP TABLE a; +SELECT pg_sleep(5); +INSERT INTO a2 SELECT generate_series(1,100); -drop table a2; -reset search_path; -drop schema sdrtbl; +DROP TABLE a2; +RESET search_path; +DROP SCHEMA sdrtbl; diff --git a/sql/test_mistake.sql b/sql/test_mistake.sql new file mode 100644 index 00000000000..55fbe322d27 --- /dev/null +++ b/sql/test_mistake.sql @@ -0,0 +1,3 @@ +-- to make sure that the schema 'notfoundns' is really not found +select nspname from pg_namespace where nspname = 'notfoundns'; +select diskquota.set_schema_quota('notfoundns', '1 MB'); diff --git a/sql/test_partition.sql b/sql/test_partition.sql index e9eae44be63..8fd3ea20cdf 100644 --- a/sql/test_partition.sql +++ b/sql/test_partition.sql @@ -1,34 +1,35 @@ -- Test partition table -create schema s8; -select diskquota.set_schema_quota('s8', '1 MB'); -set search_path to s8; +CREATE SCHEMA s8; +SELECT diskquota.SET_schema_quota('s8', '1 MB'); +SET search_path TO s8; CREATE TABLE measurement ( city_id int not null, logdate date not null, peaktemp int, unitsales int -)PARTITION BY RANGE (logdate); -CREATE TABLE measurement_y2006m02 PARTITION OF measurement - FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); +)PARTITION BY RANGE (logdate) +( + PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, + PARTITION Mar06 START (date '2006-03-01') INCLUSIVE + END (date '2016-04-01') EXCLUSIVE +); -CREATE TABLE measurement_y2006m03 PARTITION OF measurement - FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); -insert into measurement select generate_series(1,15000), '2006-02-01' ,1,1; -select pg_sleep(5); -insert into measurement select 1, '2006-02-01' ,1,1; +INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; +SELECT pg_sleep(5); +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail -insert into measurement select generate_series(1,100000000), '2006-03-02' ,1,1; +INSERT INTO measurement SELECT generate_series(1,100000000), '2006-03-02' ,1,1; -- expect insert fail -insert into measurement select 1, '2006-02-01' ,1,1; +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail -insert into measurement select 1, '2006-03-03' ,1,1; -delete from measurement where logdate='2006-03-02'; -vacuum full measurement; -select pg_sleep(5); -insert into measurement select 1, '2006-02-01' ,1,1; -insert into measurement select 1, '2006-03-03' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +DELETE FROM measurement WHERE logdate='2006-03-02'; +VACUUM FULL measurement; +SELECT pg_sleep(5); +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; -drop table measurement; -reset search_path; -drop schema s8; +DROP TABLE measurement; +RESET search_path; +DROP SCHEMA s8; diff --git a/sql/test_rename.sql b/sql/test_rename.sql index 3516bb06f17..aec3d525ccf 100644 --- a/sql/test_rename.sql +++ b/sql/test_rename.sql @@ -1,48 +1,48 @@ -- test rename schema -create schema srs1; -select diskquota.set_schema_quota('srs1', '1 MB'); +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; -create table a(i int); +CREATE TABLE a(i int); -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); -- expect insert fail -insert into a select generate_series(1,10); -alter schema srs1 rename to srs2; -set search_path to srs2; +INSERT INTO a SELECT generate_series(1,10); +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); -- test rename table -alter table a rename to a2; +ALTER TABLE a RENAME TO a2; -- expect insert fail -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); -drop table a2; -reset search_path; -drop schema srs2; +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; -- test rename role -create schema srr1; -create role srerole nologin; -select diskquota.set_role_quota('srerole', '1MB'); -set search_path to srr1; -create table a(i int); -alter table a owner to srerole; +CREATE SCHEMA srr1; +CREATE ROLE srerole NOLOGIN; +SELECT diskquota.set_role_quota('srerole', '1MB'); +SET search_path TO srr1; +CREATE TABLE a(i int); +ALTER TABLE a OWNER TO srerole; -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); -- expect insert fail -insert into a select generate_series(1,10); -alter role srerole rename to srerole2; +INSERT INTO a SELECT generate_series(1,10); +ALTER ROLE srerole RENAME TO srerole2; -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); -- test rename table -alter table a rename to a2; +ALTER TABLE a RENAME TO a2; -- expect insert fail -insert into a2 select generate_series(1,10); +INSERT INTO a2 SELECT generate_series(1,10); -drop table a2; -drop role srerole2; -reset search_path; -drop schema srr1; +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; diff --git a/sql/test_reschema.sql b/sql/test_reschema.sql index 66b690341cf..48c3c05de7c 100644 --- a/sql/test_reschema.sql +++ b/sql/test_reschema.sql @@ -1,19 +1,19 @@ -- Test re-set_schema_quota -create schema srE; -select diskquota.set_schema_quota('srE', '1 MB'); -set search_path to srE; -create table a(i int); +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); +SET search_path TO srE; +CREATE TABLE a(i int); -- expect insert fail -insert into a select generate_series(1,1000000000); +INSERT INTO a SELECT generate_series(1,1000000000); -- expect insert fail when exceed quota limit -insert into a select generate_series(1,1000); +INSERT INTO a SELECT generate_series(1,1000); -- set schema quota larger -select diskquota.set_schema_quota('srE', '1 GB'); -select pg_sleep(5); +SELECT diskquota.set_schema_quota('srE', '1 GB'); +SELECT pg_sleep(5); -- expect insert succeed -insert into a select generate_series(1,1000); +INSERT INTO a SELECT generate_series(1,1000); -drop table a; -reset search_path; -drop schema srE; +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; diff --git a/sql/test_role.sql b/sql/test_role.sql index 8a482f7fb6d..06896e7e1e8 100644 --- a/sql/test_role.sql +++ b/sql/test_role.sql @@ -1,32 +1,32 @@ -- Test role quota -create schema srole; -set search_path to srole; +CREATE SCHEMA srole; +SET search_path TO srole; -CREATE role u1 NOLOGIN; -CREATE role u2 NOLOGIN; -CREATE TABLE b (t text); +CREATE ROLE u1 NOLOGIN; +CREATE ROLE u2 NOLOGIN; +CREATE TABLE b (t TEXT); ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t text); +CREATE TABLE b2 (t TEXT); ALTER TABLE b2 OWNER TO u1; -select diskquota.set_role_quota('u1', '1 MB'); +SELECT diskquota.set_role_quota('u1', '1 MB'); -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); -- expect insert fail -insert into b select generate_series(1,100000000); +INSERT INTO b SELECT generate_series(1,100000000); -- expect insert fail -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); -- expect insert fail -insert into b2 select generate_series(1,100); -alter table b owner to u2; -select pg_sleep(5); +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO u2; +SELECT pg_sleep(5); -- expect insert succeed -insert into b select generate_series(1,100); +INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed -insert into b2 select generate_series(1,100); +INSERT INTO b2 SELECT generate_series(1,100); -drop table b, b2; -drop role u1, u2; -reset search_path; -drop schema srole; +DROP TABLE b, b2; +DROP ROLE u1, u2; +RESET search_path; +DROP SCHEMA srole; diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 392e898d8d6..27ad2e946c0 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -18,14 +18,16 @@ create schema s2; alter table s1.a set schema s2; select pg_sleep(5); -- expect insert succeed -insert into a2 select generate_series(1,20000); +insert into a2 select generate_series(1,200); -- expect insert succeed -insert into s2.a select generate_series(1,20000); +insert into s2.a select generate_series(1,200); alter table s2.a set schema badquota; -- expect failed insert into badquota.a select generate_series(0, 100); +select schema_name, quota_in_mb from diskquota.show_schema_quota_view where schema_name = 's1'; + reset search_path; drop table s1.a2, badquota.a; drop schema s1, s2; diff --git a/sql/test_temp_role.sql b/sql/test_temp_role.sql index 8dc082f455f..a2efcf80d50 100644 --- a/sql/test_temp_role.sql +++ b/sql/test_temp_role.sql @@ -1,22 +1,23 @@ -- Test temp table restrained by role id -create schema strole; -create role u3temp nologin; -set search_path to strole; +CREATE SCHEMA strole; +CREATE ROLE u3temp NOLOGIN; +SET search_path TO strole; -select diskquota.set_role_quota('u3temp', '1MB'); -create table a(i int); -alter table a owner to u3temp; -create temp table ta(i int); -alter table ta owner to u3temp; +SELECT diskquota.set_role_quota('u3temp', '1MB'); +CREATE TABLE a(i int); +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table -insert into ta select generate_series(1,100000000); +INSERT INTO ta SELECT generate_series(1,100000000); -- expected failed: -insert into a select generate_series(1,100); -drop table ta; -select pg_sleep(5); -insert into a select generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE ta; +SELECT pg_sleep(5); +INSERT INTO a SELECT generate_series(1,100); -drop table a; -reset search_path; -drop schema strole; +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/sql/test_toast.sql b/sql/test_toast.sql index 7ee6e3666ee..d682fa6ac6e 100644 --- a/sql/test_toast.sql +++ b/sql/test_toast.sql @@ -1,7 +1,7 @@ -- Test toast -create schema s5; -select diskquota.set_schema_quota('s5', '1 MB'); -set search_path to s5; +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); +SET search_path TO s5; CREATE TABLE a5 (message text); INSERT INTO a5 SELECT (SELECT @@ -9,7 +9,7 @@ SELECT (SELECT FROM generate_series(1,10000)) FROM generate_series(1,10); -select pg_sleep(5); +SELECT pg_sleep(5); -- expect insert toast fail INSERT INTO a5 SELECT (SELECT @@ -17,7 +17,7 @@ SELECT (SELECT FROM generate_series(1,100000)) FROM generate_series(1,1000000); -drop table a5; -reset search_path; -drop schema s5; +DROP TABLE a5; +RESET search_path; +DROP SCHEMA s5; diff --git a/sql/test_truncate.sql b/sql/test_truncate.sql index d19c10777d5..d269e2b9278 100644 --- a/sql/test_truncate.sql +++ b/sql/test_truncate.sql @@ -1,21 +1,21 @@ -- Test truncate -create schema s7; -select diskquota.set_schema_quota('s7', '1 MB'); -set search_path to s7; -create table a (i int); -create table b (i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); +SET search_path TO s7; +CREATE TABLE a (i int); +CREATE TABLE b (i int); +INSERT INTO a SELECT generate_series(1,100000000); +SELECT pg_sleep(5); -- expect insert fail -insert into a select generate_series(1,30); -insert into b select generate_series(1,30); -truncate table a; -select pg_sleep(5); +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); +TRUNCATE TABLE a; +SELECT pg_sleep(5); -- expect insert succeed -insert into a select generate_series(1,30); -insert into b select generate_series(1,30); +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); -drop table a, b; -reset search_path; -drop schema s7; +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s7; diff --git a/sql/test_update.sql b/sql/test_update.sql index 506cf4022cd..a6cedc35b08 100644 --- a/sql/test_update.sql +++ b/sql/test_update.sql @@ -1,13 +1,13 @@ -- Test Update -create schema s4; -select diskquota.set_schema_quota('s4', '1 MB'); -set search_path to s4; -create table a(i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +CREATE SCHEMA s4; +SELECT diskquota.set_schema_quota('s4', '1 MB'); +SET search_path TO s4; +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,100000000); +SELECT pg_sleep(5); -- expect update fail. -update a set i = 100; -drop table a; -reset search_path; -drop schema s4; +UPDATE a SET i = 100; +DROP TABLE a; +RESET search_path; +DROP SCHEMA s4; diff --git a/sql/test_vacuum.sql b/sql/test_vacuum.sql index 2f651d22af0..5a91ac15848 100644 --- a/sql/test_vacuum.sql +++ b/sql/test_vacuum.sql @@ -1,23 +1,23 @@ -- Test vacuum full -create schema s6; -select diskquota.set_schema_quota('s6', '1 MB'); -set search_path to s6; -create table a (i int); -create table b (i int); -insert into a select generate_series(1,50000); -select pg_sleep(5); +CREATE SCHEMA s6; +SELECT diskquota.set_schema_quota('s6', '1 MB'); +SET search_path TO s6; +CREATE TABLE a (i int); +CREATE TABLE b (i int); +INSERT INTO a SELECT generate_series(1,100000000); +SELECT pg_sleep(5); -- expect insert fail -insert into a select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); -- expect insert fail -insert into b select generate_series(1,10); -delete from a where i > 10; -vacuum full a; -select pg_sleep(5); +INSERT INTO b SELECT generate_series(1,10); +DELETE FROM a WHERE i > 10; +VACUUM FULL a; +SELECT pg_sleep(5); -- expect insert succeed -insert into a select generate_series(1,10); -insert into b select generate_series(1,10); +INSERT INTO a SELECT generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); -drop table a, b; -reset search_path; -drop schema s6; +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s6; diff --git a/test_diskquota.conf b/test_diskquota.conf deleted file mode 100644 index 03b04262ee2..00000000000 --- a/test_diskquota.conf +++ /dev/null @@ -1,5 +0,0 @@ -autovacuum = off -fsync = on -shared_preload_libraries = 'diskquota' -diskquota.monitor_databases = 'contrib_regression' -diskquota.naptime = 2 From 77bab4c915cd7eb2e6c749e2d34556e6d0db320d Mon Sep 17 00:00:00 2001 From: Hao Wu <37101401+gfphoenix78@users.noreply.github.com> Date: Fri, 11 Jan 2019 17:34:14 +0800 Subject: [PATCH 010/330] Enable/disable diskquota dynamically (#8) Design: In past, we use GUC diskquota_monitor_database to save the diskquota enabled databases. DBA has to reset the value in postgresql.conf each time and call pg_ctl reload to refresh the database list. To make this management process easy, we use a heap table (diskquota_name.database_list) in database `diskquota` to store the monitored database list instead. GUC diskquota_monitor_database is removed and the monitored database is loaded into table database_list by `create extension diskquota` and delete from table by `drop extension diskquota` automatically. When `create extension diskquota` is called, a UDF, diskquota_start_worker, is invoked, and the launcher will store the dboid into table diskquota_name.database_list, and start a bgworker for the corresponding database to control the disk usage of this database. When `drop extension diskquota` is called, a hook function, object_access_hook, is invoked, and the launcher will delete the dboid from table diskquota_name.database_list, and stop the corresponding bgworker. Protocol: The communication between the launcher process of diskquota and the backends uses a simple protocol, via a block of shared memory(MessageBox). When the backend wants to send a message to the launcher, it follows the steps: * acquires a lock * fills message data into the message_box * sends a signal SIGUSR1 to the launcher * waits for a response, in a loop with a timeout * consumes the response * releases the lock Note: now, the user must create database `diskquota' manually before using diskquota, or the launcher process will start failed. * Fix regression test caused by the changes of database diskquota uses * Add a test case: insert after drop extension diskquota 1. insert should not fail due to constraint of diskquota 2. constraint added by diskquota will be removed immediately when user drop extension 3. update error message for `ERR_PENDING' Fix changes from pg to gpdb: 1. Fix lwlock request and alloc 2. Fix some typo * remove the unneeded code to set `monitor_databases` * Fix a bug caused by debug_query_string when executes utility SPI reset ps display name of the worker process before logic loop * Update test cases 1. remove prepare0, fini 2. add test_insert_after_drop 3. update keywords to upper case 4. update result files for test_schema, test_extension, test_insert_after_drop --- diskquota--1.0.sql | 22 +- diskquota.c | 699 +++++++++++++++++++--------- diskquota.h | 63 ++- diskquota_schedule | 4 +- expected/fini.out | 9 - expected/init.out | 11 +- expected/test_extension.out | 293 ++++++++++++ expected/test_insert_after_drop.out | 33 ++ expected/test_schema.out | 40 +- gp_activetable.c | 26 +- gp_activetable.h | 1 - quotamodel.c | 58 ++- sql/fini.sql | 8 - sql/init.sql | 13 +- sql/test_extension.sql | 203 ++++++++ sql/test_insert_after_drop.sql | 20 + sql/test_schema.sql | 40 +- 17 files changed, 1197 insertions(+), 346 deletions(-) delete mode 100644 expected/fini.out create mode 100644 expected/test_extension.out create mode 100644 expected/test_insert_after_drop.out delete mode 100644 sql/fini.sql create mode 100644 sql/test_extension.sql create mode 100644 sql/test_insert_after_drop.sql diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 0fad6e46e50..abd2105fede 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -5,39 +5,43 @@ CREATE SCHEMA diskquota; -set search_path='diskquota'; - -- Configuration table create table diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -CREATE FUNCTION set_schema_quota(text, text) +CREATE FUNCTION diskquota.set_schema_quota(text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE FUNCTION set_role_quota(text, text) +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE VIEW show_schema_quota_view AS +CREATE VIEW diskquota.show_schema_quota_view AS SELECT pg_namespace.nspname as schema_name, pg_class.relnamespace as schema_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as nspsize_in_bytes FROM pg_namespace, pg_class, diskquota.quota_config as quota WHERE pg_class.relnamespace = quota.targetoid and pg_class.relnamespace = pg_namespace.oid and quota.quotatype=0 GROUP BY pg_class.relnamespace, pg_namespace.nspname, quota.quotalimitMB; -CREATE VIEW show_role_quota_view AS +CREATE VIEW diskquota.show_role_quota_view AS SELECT pg_roles.rolname as role_name, pg_class.relowner as role_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as rolsize_in_bytes FROM pg_roles, pg_class, diskquota.quota_config as quota WHERE pg_class.relowner = quota.targetoid and pg_class.relowner = pg_roles.oid and quota.quotatype=1 GROUP BY pg_class.relowner, pg_roles.rolname, quota.quotalimitMB; -CREATE TYPE diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); +CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); -CREATE OR REPLACE FUNCTION diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota_active_table_type +CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -reset search_path; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota.c b/diskquota.c index e8f5dcd72c5..22dc302d056 100644 --- a/diskquota.c +++ b/diskquota.c @@ -18,12 +18,22 @@ #include +#include "access/tupdesc.h" +#include "access/xact.h" +#include "catalog/indexing.h" #include "catalog/namespace.h" +#include "catalog/objectaccess.h" #include "catalog/pg_collation.h" +#include "catalog/pg_database.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_type.h" #include "cdb/cdbvars.h" +#include "commands/dbcommands.h" +#include "commands/extension.h" #include "executor/spi.h" #include "libpq/libpq-be.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "pgstat.h" #include "postmaster/bgworker.h" #include "storage/ipc.h" @@ -31,8 +41,13 @@ #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" #include "utils/formatting.h" +#include "utils/memutils.h" #include "utils/numeric.h" +#include "utils/ps_status.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" #include "gp_activetable.h" #include "diskquota.h" @@ -41,6 +56,10 @@ PG_MODULE_MAGIC; /* disk quota helper function */ PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); +PG_FUNCTION_INFO_V1(diskquota_start_worker); + +/* timeout count to wait response from launcher process, in 1/10 sec */ +#define WAIT_TIME_COUNT 120 /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 @@ -48,23 +67,28 @@ PG_FUNCTION_INFO_V1(set_role_quota); /* flags set by signal handlers */ static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; +static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ -int diskquota_naptime = 0; -char *diskquota_monitored_database_list = NULL; -int diskquota_max_active_tables = 0; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; /* disk quota worker info used by launcher to manage the worker processes. */ struct DiskQuotaWorkerEntry { - char dbname[NAMEDATALEN]; + Oid dbid; + pid_t pid; /* worker pid */ BackgroundWorkerHandle *handle; }; +DiskQuotaLocks diskquota_locks; +volatile MessageBox * message_box = NULL; /* using hash table to support incremental update the table size entry.*/ static HTAB *disk_quota_worker_map = NULL; +static object_access_hook_type next_object_access_hook; +static int num_db = 0; /* functions of disk quota*/ void _PG_init(void); @@ -74,11 +98,20 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); -static List *get_database_list(bool *is_refresh); static int64 get_size_in_mb(char *str); -static void refresh_worker_list(void); static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static int start_worker(char *dbname); +static int start_worker_by_dboid(Oid dbid); +static void create_monitor_db_table(); +static inline void exec_simple_utility(const char *sql); +static void exec_simple_spi(const char *sql, int expected_code); +static bool add_db_to_config(Oid dbid); +static void del_db_from_config(Oid dbid); +static void process_message_box(void); +static void process_message_box_internal(MessageResult *code); +static void dq_object_access_hook(ObjectAccessType access, Oid classId, + Oid objectId, int subId, void *arg); +static const char *err_code_to_err_message(MessageResult code); +extern void diskquota_invalidate_db(Oid dbid); /* * Entrypoint of diskquota module. @@ -114,19 +147,6 @@ _PG_init(void) NULL, NULL); - if (!process_shared_preload_libraries_in_progress) - return; - - DefineCustomStringVariable("diskquota.monitor_databases", - gettext_noop("database list with disk quota monitored."), - NULL, - &diskquota_monitored_database_list, - "", - PGC_SIGHUP, GUC_LIST_INPUT, - NULL, - NULL, - NULL); - DefineCustomIntVariable("diskquota.max_active_tables", "max number of active tables monitored by disk-quota", NULL, @@ -145,6 +165,9 @@ _PG_init(void) { return; } + /* Add dq_object_access_hook to handle drop extension event.*/ + next_object_access_hook = object_access_hook; + object_access_hook = dq_object_access_hook; /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | @@ -155,7 +178,7 @@ _PG_init(void) snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_launcher_main"); worker.bgw_notify_pid = 0; - snprintf(worker.bgw_name, BGW_MAXLEN, "disk quota launcher"); + snprintf(worker.bgw_name, BGW_MAXLEN, "[diskquota] - launcher"); RegisterBackgroundWorker(&worker); } @@ -199,6 +222,21 @@ disk_quota_sighup(SIGNAL_ARGS) errno = save_errno; } +/* + * Signal handler for SIGUSR1 + * Set a flag to tell the launcher to handle message box + */ +static void +disk_quota_sigusr1(SIGNAL_ARGS) +{ + int save_errno = errno; + got_sigusr1 = true; + + if (MyProc) + SetLatch(&MyProc->procLatch); + + errno = save_errno; +} /* ---- Functions for disk quota worker process ---- */ @@ -218,6 +256,7 @@ disk_quota_worker_main(Datum main_arg) /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); pqsignal(SIGTERM, disk_quota_sigterm); + pqsignal(SIGUSR1, disk_quota_sigusr1); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); @@ -232,6 +271,13 @@ disk_quota_worker_main(Datum main_arg) init_disk_quota_model(); refresh_disk_quota_model(true); + /* + * Set ps display name of the worker process of diskquota, + * so we can distinguish them quickly. + * Note: never mind parameter name of the function `init_ps_display`, + * we only want the ps name looks like 'bgworker: [diskquota] ...' + */ + init_ps_display("bgworker:", "[diskquota]", dbname, ""); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -267,7 +313,222 @@ disk_quota_worker_main(Datum main_arg) } } - proc_exit(1); + diskquota_invalidate_db(MyDatabaseId); + proc_exit(0); +} + +/** + * create table to record the list of monitored databases + * we need a place to store the database with diskquota enabled + * (via CREATE EXTENSION diskquota). Currently, we store them into + * heap table in diskquota_namespace schema of postgres database. + * When database restarted, diskquota laucher will start worker processes + * for these databases. + */ +static void +create_monitor_db_table() +{ + const char *sql; + sql = "create schema if not exists diskquota_namespace;" + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; + exec_simple_utility(sql); +} + +static inline void +exec_simple_utility(const char *sql) +{ + debug_query_string = sql; + StartTransactionCommand(); + exec_simple_spi(sql, SPI_OK_UTILITY); + CommitTransactionCommand(); + debug_query_string = NULL; +} + +static void +exec_simple_spi(const char *sql, int expected_code) +{ + int ret; + + ret = SPI_connect(); + if (ret != SPI_OK_CONNECT) + elog(ERROR, "connect error, code=%d", ret); + PushActiveSnapshot(GetTransactionSnapshot()); + ret = SPI_execute(sql, false, 0); + if (ret != expected_code) + elog(ERROR, "sql:'%s', code %d", sql, ret); + SPI_finish(); + PopActiveSnapshot(); +} + +static bool +is_valid_dbid(Oid dbid) +{ + HeapTuple tuple; + + if (dbid == InvalidOid) + return false; + tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); + if (!HeapTupleIsValid(tuple)) + return false; + ReleaseSysCache(tuple); + return true; +} +/* + * in early stage, start all worker processes of diskquota-enabled databases + * from diskquota_namespace.database_list + */ +static void +start_workers_from_dblist() +{ + TupleDesc tupdesc; + Oid fake_dbid[128]; + int fake_count = 0; + int num = 0; + int ret; + int i; + StartTransactionCommand(); + PushActiveSnapshot(GetTransactionSnapshot()); + ret = SPI_connect(); + if (ret != SPI_OK_CONNECT) + elog(ERROR, "connect error, code=%d", ret); + ret = SPI_execute("select dbid from diskquota_namespace.database_list;", false, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "select diskquota_namespace.database_list"); + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) + elog(ERROR, "[diskquota] table database_list corrupt, laucher will exit"); + + for (i = 0; num < SPI_processed; i++) + { + HeapTuple tup; + Oid dbid; + Datum dat; + bool isnull; + + tup = SPI_tuptable->vals[i]; + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + { + elog(ERROR, "dbid cann't be null"); + } + dbid = DatumGetObjectId(dat); + if (!is_valid_dbid(dbid)) + { + fake_dbid[fake_count++] = dbid; + continue; + } + if (start_worker_by_dboid(dbid) < 1) + { + elog(WARNING, "[diskquota]: start worker process of database(%d) failed", dbid); + } + num++; + } + num_db = num; + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + + /* TODO: clean invalid database */ + +} + +static bool +add_db_to_config(Oid dbid) +{ + StringInfoData str; + + initStringInfo(&str); + appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%d);", dbid); + exec_simple_spi(str.data, SPI_OK_INSERT); + return true; +} + +static void +del_db_from_config(Oid dbid) +{ + StringInfoData str; + + initStringInfo(&str); + appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%d;", dbid); + exec_simple_spi(str.data, SPI_OK_DELETE); +} + +/* + * When drop exention database, diskquota laucher will receive a message + * to kill the diskquota worker process which monitoring the target database. + */ +static void +try_kill_db_worker(Oid dbid) +{ + DiskQuotaWorkerEntry *hash_entry; + bool found; + hash_entry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, + (void *)&dbid, + HASH_REMOVE, &found); + if (found) + { + BackgroundWorkerHandle *handle; + handle = hash_entry->handle; + TerminateBackgroundWorker(handle); + pfree(handle); + } +} + +/* + * handle create extension diskquota + * if we know the exact error which caused failure, + * we set it, and error out + */ +static void +on_add_db(Oid dbid, MessageResult *code) +{ + if (num_db >= MAX_NUM_MONITORED_DB) + { + *code = ERR_EXCEED; + elog(ERROR, "[diskquota] too database to monitor"); + } + if (!is_valid_dbid(dbid)) + { + *code = ERR_INVALID_DBID; + elog(ERROR, "[diskquota] invalid database oid"); + } + + /* + * add dbid to diskquota_namespace.database_list + * set *code to ERR_ADD_TO_DB if any error occurs + */ + PG_TRY(); + { + add_db_to_config(dbid); + } + PG_CATCH(); + { + *code = ERR_ADD_TO_DB; + PG_RE_THROW(); + } + PG_END_TRY(); + + if (start_worker_by_dboid(dbid) < 1) + { + *code = ERR_START_WORKER; + elog(ERROR, "[diskquota] failed to start worker - dbid=%d", dbid); + } +} + +/* + * handle message: drop extension diskquota + * do our best to: + * 1. kill the associated worker process + * 2. delete dbid from diskquota_namespace.database_list + * 3. invalidate black-map entries from shared memory + */ +static void +on_del_db(Oid dbid) +{ + if (dbid == InvalidOid) + return; + try_kill_db_worker(dbid); + del_db_from_config(dbid); } /* ---- Functions for lancher process ---- */ @@ -278,50 +539,32 @@ disk_quota_worker_main(Datum main_arg) void disk_quota_launcher_main(Datum main_arg) { - List *dblist; - ListCell *cell; HASHCTL hash_ctl; - bool is_refresh = false; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); pqsignal(SIGTERM, disk_quota_sigterm); + pqsignal(SIGUSR1, disk_quota_sigusr1); /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + message_box->launcher_pid = MyProcPid; + /* Connect to our database */ + BackgroundWorkerInitializeConnection("diskquota", NULL); + create_monitor_db_table(); + memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = NAMEDATALEN; + hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); + hash_ctl.hash = oid_hash; disk_quota_worker_map = hash_create("disk quota worker map", - 1024, - &hash_ctl, - HASH_ELEM); - - ereport(LOG, - (errmsg("diskquota launcher started"))); - - dblist = get_database_list(&is_refresh); - if (is_refresh) - { - foreach(cell, dblist) - { - char *db_name; - - db_name = (char *) lfirst(cell); - if (db_name == NULL || *db_name == '\0') - { - ereport(LOG, - (errmsg("invalid db name='%s' in diskquota.monitor_databases", db_name))); - continue; - } - start_worker(db_name); - } - } - /* free dblist */ - list_free(dblist); + 1024, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + start_workers_from_dblist(); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -343,7 +586,12 @@ disk_quota_launcher_main(Datum main_arg) /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); - + /* process message box, now someone is holding message_box_lock */ + if (got_sigusr1) + { + got_sigusr1 = false; + process_message_box(); + } /* * In case of a SIGHUP, just reload the configuration. */ @@ -351,12 +599,6 @@ disk_quota_launcher_main(Datum main_arg) { got_sighup = false; ProcessConfigFile(PGC_SIGHUP); - - /* - * terminate not monitored worker process and start new worker - * process - */ - refresh_worker_list(); } } @@ -364,175 +606,44 @@ disk_quota_launcher_main(Datum main_arg) proc_exit(1); } -/* - * Extract database list in GUC diskquota.monitored_database_list - * Parameter is_refresh is used to indicate whether to refresh the - * monitored database list when GUC monitored_database_list changed. - * If GUC contains more than 10 databases, is_refresh is set to false. - */ -static List * -get_database_list(bool *is_refresh) -{ - List *monitor_db_list = NIL; - char *dbstr; - - *is_refresh = true; - dbstr = pstrdup(diskquota_monitored_database_list); - - if (!SplitIdentifierString(dbstr, ',', &monitor_db_list)) - { - ereport(WARNING, - (errmsg("GUC monitor_databases:'%s' is invalid, GUC should be" - "separated by comma", - diskquota_monitored_database_list))); - pfree(dbstr); - return NULL; - } - - /* - * We only allow to minitor at most 10 databases truncate the list if - * there are more than 10 databases in list. - */ - if (list_length(monitor_db_list) > MAX_NUM_MONITORED_DB) - { - *is_refresh = false; - ereport(WARNING, - (errmsg("Currently diskquota could monitor at most 10 databases." - "GUC monitor_databases:'%s' contains more than" - " 10 databases, additional databases will be ignored.", - diskquota_monitored_database_list))); - monitor_db_list = list_truncate(monitor_db_list, MAX_NUM_MONITORED_DB); - } - - pfree(dbstr); - /* dblist should be list_free by the caller */ - return monitor_db_list; -} - -/* - * When launcher receive SIGHUP, it will call refresh_worker_list() - * to terminate worker processes whose connected database no longer need - * to be monitored, and start new worker processes to watch new database. - */ -static void -refresh_worker_list(void) -{ - List *monitor_dblist; - ListCell *cell; - bool flag = false; - bool is_refresh = false; - bool found; - DiskQuotaWorkerEntry *hash_entry; - HASH_SEQ_STATUS status; - - monitor_dblist = get_database_list(&is_refresh); - if (!is_refresh) - { - ereport(WARNING, - (errmsg("Failed to refresh monitored database. GUC " - "monitor_databases:'%s' should contain less than " - "10 databases.", - diskquota_monitored_database_list))); - return; - } - - /* - * refresh the worker process based on the configuration file change. step - * 1 is to terminate worker processes whose connected database not in - * monitor database list. - */ - ereport(LOG, - (errmsg("Refresh monitored database list."))); - hash_seq_init(&status, disk_quota_worker_map); - - while ((hash_entry = (DiskQuotaWorkerEntry *) hash_seq_search(&status)) != NULL) - { - flag = false; - foreach(cell, monitor_dblist) - { - char *db_name; - - db_name = (char *) lfirst(cell); - if (db_name == NULL || *db_name == '\0') - { - continue; - } - if (strcmp(db_name, hash_entry->dbname) == 0) - { - flag = true; - break; - } - } - if (!flag) - { - TerminateBackgroundWorker(hash_entry->handle); - (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) hash_entry->dbname, - HASH_REMOVE, NULL); - } - } - - /* step 2: start new worker which first appears in monitor database list. */ - foreach(cell, monitor_dblist) - { - DiskQuotaWorkerEntry *workerentry; - char *db_name; - pid_t pid; - - db_name = (char *) lfirst(cell); - if (db_name == NULL || *db_name == '\0') - { - continue; - } - workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) db_name, - HASH_FIND, &found); - if (found) - { - /* in case worker is not in BGWH_STARTED mode, restart it. */ - if (GetBackgroundWorkerPid(workerentry->handle, &pid) != BGWH_STARTED) - start_worker(db_name); - } - else - { - start_worker(db_name); - } - } - - /* free monitor_dblist */ - list_free(monitor_dblist); -} - /* * Dynamically launch an disk quota worker process. */ static int -start_worker(char *dbname) +start_worker_by_dboid(Oid dbid) { BackgroundWorker worker; BackgroundWorkerHandle *handle; BgwHandleStatus status; - pid_t pid; - bool found; - DiskQuotaWorkerEntry *workerentry; + MemoryContext old_ctx; + char *dbname; + pid_t pid; + bool found; + bool ok; + DiskQuotaWorkerEntry* workerentry; memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; - snprintf(worker.bgw_library_name, BGW_MAXLEN, "diskquota"); - snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_worker_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "%s", dbname); + sprintf(worker.bgw_library_name, "diskquota"); + sprintf(worker.bgw_function_name, "disk_quota_worker_main"); + + dbname = get_database_name(dbid); + Assert(dbname != NULL); + snprintf(worker.bgw_name, sizeof(worker.bgw_name), "%s", dbname); + pfree(dbname); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; worker.bgw_main_arg = (Datum) 0; - if (!RegisterDynamicBackgroundWorker(&worker, &handle)) + old_ctx = MemoryContextSwitchTo(TopMemoryContext); + ok = RegisterDynamicBackgroundWorker(&worker, &handle); + MemoryContextSwitchTo(old_ctx); + if (!ok) return -1; - status = WaitForBackgroundWorkerStartup(handle, &pid); - if (status == BGWH_STOPPED) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), @@ -543,15 +654,17 @@ start_worker(char *dbname) (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("cannot start background processes without postmaster"), errhint("Kill all remaining database processes and restart the database."))); + Assert(status == BGWH_STARTED); /* put the worker handle into the worker map */ - workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) dbname, - HASH_ENTER, &found); + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, + (void *)&dbid, + HASH_ENTER, &found); if (!found) { workerentry->handle = handle; + workerentry->pid = pid; } return pid; @@ -822,3 +935,167 @@ get_size_in_mb(char *str) return result; } + +/* + * trigger start diskquota worker when create extension diskquota + * This function is called at backend side, and will send message to + * diskquota launcher. Luacher process is responsible for starting the real + * diskquota worker process. + */ +Datum +diskquota_start_worker(PG_FUNCTION_ARGS) +{ + int rc; + elog(LOG, "[diskquota]:DB = %d, MyProc=%p launcher pid=%d", MyDatabaseId, MyProc, message_box->launcher_pid); + LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); + message_box->req_pid = MyProcPid; + message_box->cmd = CMD_CREATE_EXTENSION; + message_box->result = ERR_PENDING; + message_box->data[0] = MyDatabaseId; + /* setup sig handler to receive message */ + rc = kill(message_box->launcher_pid, SIGUSR1); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + while(count-- > 0) + { + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + 100L); + if (rc & WL_POSTMASTER_DEATH) + break; + ResetLatch(&MyProc->procLatch); + if (message_box->result != ERR_PENDING) + break; + } + } + message_box->req_pid = 0; + LWLockRelease(diskquota_locks.message_box_lock); + if (message_box->result != ERR_OK) + elog(ERROR, "%s", err_code_to_err_message((MessageResult)message_box->result)); + PG_RETURN_VOID(); +} + +static void +process_message_box_internal(MessageResult *code) +{ + Assert(message_box->launcher_pid == MyProcPid); + switch (message_box->cmd) + { + case CMD_CREATE_EXTENSION: + on_add_db(message_box->data[0], code); + num_db++; + break; + case CMD_DROP_EXTENSION: + on_del_db(message_box->data[0]); + num_db--; + break; + default: + elog(LOG, "[diskquota]:unsupported message cmd=%d", message_box->cmd); + *code = ERR_UNKNOWN; + break; + } +} + +/* + * this function is called by launcher process to handle message from other backend + * processes which call CREATE/DROP EXTENSION diskquota; It must be able to catch errors, + * and return an error code back to the backend process. + */ +static void +process_message_box() +{ + MessageResult code = ERR_UNKNOWN; + int old_num_db = num_db; + if (message_box->req_pid == 0) + return; + elog(LOG, "[launcher]: received message"); + PG_TRY(); + { + StartTransactionCommand(); + process_message_box_internal(&code); + CommitTransactionCommand(); + code = ERR_OK; + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + AbortCurrentTransaction(); + FlushErrorState(); + RESUME_INTERRUPTS(); + num_db = old_num_db; + } + PG_END_TRY(); + + message_box->result = (int)code; +} + +/* + * This hook is used to handle drop extension diskquota event + * It will send CMD_DROP_EXTENSION message to diskquota laucher. + * Laucher will terminate the corresponding worker process and + * remove the dbOid from the database_list table. + */ +static void +dq_object_access_hook(ObjectAccessType access, Oid classId, + Oid objectId, int subId, void *arg) +{ + Oid oid; + int rc; + if (access != OAT_DROP || classId != ExtensionRelationId) + goto out; + oid = get_extension_oid("diskquota", true); + if (oid != objectId) + goto out; + /* + * invoke drop extension diskquota + * 1. stop bgworker for MyDatabaseId + * 2. remove dbid from diskquota_namespace.database_list in postgres + */ + LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); + message_box->req_pid = MyProcPid; + message_box->cmd = CMD_DROP_EXTENSION; + message_box->result = ERR_PENDING; + message_box->data[0] = MyDatabaseId; + rc = kill(message_box->launcher_pid, SIGUSR1); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + while(count-- >0) + { + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + 100L); + if (rc & WL_POSTMASTER_DEATH) + break; + ResetLatch(&MyProc->procLatch); + if (message_box->result != ERR_PENDING) + break; + } + } + message_box->req_pid = 0; + LWLockRelease(diskquota_locks.message_box_lock); + if (message_box->result != ERR_OK) + elog(ERROR, "[diskquota] %s", err_code_to_err_message((MessageResult)message_box->result)); + elog(LOG, "[diskquota] DROP EXTENTION diskquota; OK"); + +out: + if (next_object_access_hook) + (*next_object_access_hook)(access, classId, objectId, + subId, arg); +} + +static const char *err_code_to_err_message(MessageResult code) +{ + switch (code) + { + case ERR_PENDING: return "no response from launcher, or timeout"; + case ERR_OK: return "NO ERROR"; + case ERR_EXCEED: return "too many database to monitor"; + case ERR_ADD_TO_DB: return "add dbid to database_list failed"; + case ERR_START_WORKER: return "start worker failed"; + case ERR_INVALID_DBID: return "invalid dbid"; + default: return "unknown error"; + } +} diff --git a/diskquota.h b/diskquota.h index c3b4d75864d..e2b5a9f3be8 100644 --- a/diskquota.h +++ b/diskquota.h @@ -16,13 +16,64 @@ typedef enum FETCH_ACTIVE_SIZE /* fetch size for active tables */ } FetchTableStatType; -typedef struct +struct DiskQuotaLocks { - LWLock *lock; /* protects shared memory of blackMap */ -} disk_quota_shared_state; + LWLock *active_table_lock; + LWLock *black_map_lock; + LWLock *message_box_lock; +}; +typedef struct DiskQuotaLocks DiskQuotaLocks; + +/* + * MessageBox is used to store a message for communication between + * the diskquota launcher process and backends. + * When backend create an extension, it send a message to launcher + * to start the diskquota worker process and write the corresponding + * dbOid into diskquota database_list table in postgres database. + * When backend drop an extension, it will send a message to launcher + * to stop the diskquota worker process and remove the dbOid from diskquota + * database_list table as well. + */ +struct MessageBox +{ + int launcher_pid; + int req_pid; /* pid of the request process */ + int cmd; /* message command type, see MessageCommand */ + int result; /* message result writen by launcher, see MessageResult */ + int data[4]; /* for create/drop extension diskquota, data[0] is dbid */ +}; + +enum MessageCommand +{ + CMD_CREATE_EXTENSION = 1, + CMD_DROP_EXTENSION, +}; + +enum MessageResult +{ + ERR_PENDING = 0, + ERR_OK, + /* the number of database exceeds the maximum */ + ERR_EXCEED, + /* add the dbid to diskquota_namespace.database_list failed */ + ERR_ADD_TO_DB, + /* cann't start worker process */ + ERR_START_WORKER, + /* invalid dbid */ + ERR_INVALID_DBID, + ERR_UNKNOWN, +}; + +typedef struct MessageBox MessageBox; +typedef enum MessageCommand MessageCommand; +typedef enum MessageResult MessageResult; + +extern DiskQuotaLocks diskquota_locks; +extern volatile MessageBox *message_box; /* enforcement interface*/ extern void init_disk_quota_enforcement(void); +extern void diskquota_invalidate_db(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); @@ -33,10 +84,8 @@ extern bool quota_check_common(Oid reloid); /* quotaspi interface */ extern void init_disk_quota_hook(void); -extern int diskquota_naptime; -extern char *diskquota_monitored_database_list; -extern int diskquota_max_active_tables; - extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); +extern int diskquota_naptime; +extern int diskquota_max_active_tables; #endif diff --git a/diskquota_schedule b/diskquota_schedule index 64b4c7524ff..61fadf26c7c 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -3,6 +3,6 @@ test: prepare test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_partition test: test_vacuum +test: test_extension test: clean -test: fini - +test: test_insert_after_drop diff --git a/expected/fini.out b/expected/fini.out deleted file mode 100644 index cff593cf3ad..00000000000 --- a/expected/fini.out +++ /dev/null @@ -1,9 +0,0 @@ -\! gpconfig -c diskquota.monitor_databases -v postgres > /dev/null -\! echo $? -0 --- start_ignore -\! gpstop -u > /dev/null -\! echo $? -0 --- end_ignore -\! sleep 2 diff --git a/expected/init.out b/expected/init.out index feba3dcdd93..0f149d65ca9 100644 --- a/expected/init.out +++ b/expected/init.out @@ -4,22 +4,17 @@ \! echo $? 0 -- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpconfig -c diskquota.monitor_databases -v contrib_regression > /dev/null +\! gpconfig -c diskquota.naptime -v 2 > /dev/null -- end_ignore \! echo $? 0 -- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null +\! gpconfig -c max_worker_processes -v 20 > /dev/null -- end_ignore \! echo $? 0 -- start_ignore -\! gpstop -u > /dev/null +\! gpstop -raf > /dev/null -- end_ignore \! echo $? 0 diff --git a/expected/test_extension.out b/expected/test_extension.out new file mode 100644 index 00000000000..2a64469cf71 --- /dev/null +++ b/expected/test_extension.out @@ -0,0 +1,293 @@ +-- NOTE: when test this script, you must make sure that there is no diskquota launcher +-- process or diskquota worker process. i.e. `ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l` +-- returns 0 +CREATE DATABASE dbx0 ; +CREATE DATABASE dbx1 ; +CREATE DATABASE dbx2 ; +CREATE DATABASE dbx3 ; +CREATE DATABASE dbx4 ; +CREATE DATABASE dbx5 ; +CREATE DATABASE dbx6 ; +CREATE DATABASE dbx7 ; +CREATE DATABASE dbx8 ; +CREATE DATABASE dbx9 ; +CREATE DATABASE dbx10 ; +show max_worker_processes; + max_worker_processes +---------------------- + 20 +(1 row) + +\! sleep 4 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +\c dbx0 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +3 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx1 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +4 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx2 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +5 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx3 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +6 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx4 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +7 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx5 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +8 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx6 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +9 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx7 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +10 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx8 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +11 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000000)); +ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name:sx +DROP TABLE SX.a; +\c dbx9 +CREATE EXTENSION diskquota; +ERROR: too many database to monitor (diskquota.c:975) +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +11 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); +ERROR: schema "diskquota" does not exist +INSERT INTO SX.a values(generate_series(0, 10000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; +\c dbx10 +CREATE EXTENSION diskquota; +ERROR: too many database to monitor (diskquota.c:975) +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +11 +\c dbx0 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +10 +\c dbx1 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +9 +\c dbx2 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +8 +\c dbx3 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +7 +\c dbx4 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +6 +\c dbx5 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +5 +\c dbx6 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +4 +\c dbx7 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +3 +\c dbx8 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +\c dbx9 +DROP EXTENSION diskquota; +ERROR: extension "diskquota" does not exist +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +\c dbx10 +DROP EXTENSION diskquota; +ERROR: extension "diskquota" does not exist +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +\c postgres +DROP DATABASE dbx0 ; +DROP DATABASE dbx1 ; +DROP DATABASE dbx2 ; +DROP DATABASE dbx3 ; +DROP DATABASE dbx4 ; +DROP DATABASE dbx5 ; +DROP DATABASE dbx6 ; +DROP DATABASE dbx7 ; +DROP DATABASE dbx8 ; +DROP DATABASE dbx9 ; +DROP DATABASE dbx10 ; diff --git a/expected/test_insert_after_drop.out b/expected/test_insert_after_drop.out new file mode 100644 index 00000000000..f50a06c9a47 --- /dev/null +++ b/expected/test_insert_after_drop.out @@ -0,0 +1,33 @@ +CREATE DATABASE db_insert_after_drop; +\c db_insert_after_drop +CREATE EXTENSION diskquota; +-- Test Drop Extension +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO sdrtbl; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000000); +ERROR: schema's disk space quota exceeded with name:sdrtbl +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:sdrtbl +DROP EXTENSION diskquota; +-- no sleep, it will take effect immediately +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +\c postgres +DROP DATABASE db_insert_after_drop; diff --git a/expected/test_schema.out b/expected/test_schema.out index 547ac8ded4e..f94d1b5e44c 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -1,51 +1,51 @@ -- Test schema -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); set_schema_quota ------------------ (1 row) -set search_path to s1; -create table a(i int); +SET search_path TO s1; +CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -insert into a select generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s1 -- expect insert fail -insert into a select generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 -create table a2(i int); +CREATE TABLE a2(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail -insert into a2 select generate_series(1,100); +INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 -- Test alter table set schema -create schema s2; -alter table s1.a set schema s2; -select pg_sleep(5); +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT pg_sleep(5); pg_sleep ---------- (1 row) -- expect insert succeed -insert into a2 select generate_series(1,200); +INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed -insert into s2.a select generate_series(1,200); -alter table s2.a set schema badquota; +INSERT INTO s2.a SELECT generate_series(1,200); +ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed -insert into badquota.a select generate_series(0, 100); +INSERT INTO badquota.a SELECT generate_series(0, 100); ERROR: schema's disk space quota exceeded with name:badquota -select schema_name, quota_in_mb from diskquota.show_schema_quota_view where schema_name = 's1'; +SELECT schema_name, quota_in_mb FROM diskquota.show_schema_quota_view WHERE schema_name = 's1'; schema_name | quota_in_mb -------------+------------- s1 | 1 (1 row) -reset search_path; -drop table s1.a2, badquota.a; -drop schema s1, s2; +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2; diff --git a/gp_activetable.c b/gp_activetable.c index cbe0c1a80b9..0a885ea4dc4 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -154,24 +154,6 @@ init_shm_worker_active_tables(void) HASH_ELEM | HASH_FUNCTION); } -/* - * Init lock of active table map - */ -void -init_lock_active_tables(void) -{ - bool found = false; - - active_table_shm_lock = ShmemInitStruct("disk_quota_active_table_shm_lock", - sizeof(disk_quota_shared_state), - &found); - - if (!found) - { - active_table_shm_lock->lock = LWLockAssign(); - } -} - /* * Common function for reporting active tables, used by smgr and ao */ @@ -188,11 +170,11 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) item.relfilenode = relFileNode->node.relNode; item.tablespaceoid = relFileNode->node.spcNode; - LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); if (entry && !found) *entry = item; - LWLockRelease(active_table_shm_lock->lock); + LWLockRelease(diskquota_locks.active_table_lock); if (!found && entry == NULL) { @@ -533,7 +515,7 @@ get_active_tables(void) HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); /* Move active table from shared memory to local active table map */ - LWLockAcquire(active_table_shm_lock->lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); hash_seq_init(&iter, active_tables_map); @@ -553,7 +535,7 @@ get_active_tables(void) *entry = *active_table_file_entry; hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); } - LWLockRelease(active_table_shm_lock->lock); + LWLockRelease(diskquota_locks.active_table_lock); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); diff --git a/gp_activetable.h b/gp_activetable.h index 01a040493f6..1b975609665 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -24,7 +24,6 @@ extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); extern HTAB *active_tables_map; -extern disk_quota_shared_state * active_table_shm_lock; #define atooid(x) ((Oid) strtoul((x), NULL, 10)) diff --git a/quotamodel.c b/quotamodel.c index 60f40bc96d4..590a28ed869 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -119,9 +119,6 @@ static HTAB *role_quota_limit_map = NULL; static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; -static disk_quota_shared_state * black_map_shm_lock; -disk_quota_shared_state *active_table_shm_lock = NULL; - static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to refresh disk quota model*/ @@ -149,13 +146,19 @@ DiskQuotaShmemSize(void) { Size size; - size = MAXALIGN(sizeof(disk_quota_shared_state)); - size = add_size(size, size); /* two locks */ + size = sizeof(MessageBox); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); return size; } +static void +init_lwlocks(void) +{ + diskquota_locks.active_table_lock = LWLockAssign(); + diskquota_locks.black_map_lock = LWLockAssign(); + diskquota_locks.message_box_lock = LWLockAssign(); +} /* * DiskQuotaShmemInit * Allocate and initialize diskquota-related shared memory @@ -169,21 +172,16 @@ disk_quota_shmem_startup(void) if (prev_shmem_startup_hook) (*prev_shmem_startup_hook) (); - black_map_shm_lock = NULL; disk_quota_black_map = NULL; LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); - black_map_shm_lock = ShmemInitStruct("disk_quota_black_map_shm_lock", - sizeof(disk_quota_shared_state), - &found); - + init_lwlocks(); + message_box = ShmemInitStruct("disk_quota_message_box", + sizeof(MessageBox), + &found); if (!found) - { - black_map_shm_lock->lock = LWLockAssign(); - } - - init_lock_active_tables(); + memset((void*)message_box, 0, sizeof(MessageBox)); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); @@ -210,6 +208,7 @@ init_disk_quota_shmem(void) * resources in pgss_shmem_startup(). */ RequestAddinShmemSpace(DiskQuotaShmemSize()); + RequestAddinLWLocks(3); /* * Install startup hook to initialize our shared memory. @@ -339,7 +338,8 @@ flush_local_black_map(void) BlackMapEntry *blackentry; bool found; - LWLockAcquire(black_map_shm_lock->lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, local_disk_quota_black_map); while ((localblackentry = hash_seq_search(&iter)) != NULL) { @@ -377,7 +377,7 @@ flush_local_black_map(void) HASH_REMOVE, NULL); } } - LWLockRelease(black_map_shm_lock->lock); + LWLockRelease(diskquota_locks.black_map_lock); } /* @@ -846,7 +846,7 @@ quota_check_common(Oid reloid) memset(&keyitem, 0, sizeof(BlackMapEntry)); get_rel_owner_schema(reloid, &ownerOid, &nsOid); - LWLockAcquire(black_map_shm_lock->lock, LW_SHARED); + LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); if (nsOid != InvalidOid) { @@ -882,6 +882,26 @@ quota_check_common(Oid reloid) return false; } } - LWLockRelease(black_map_shm_lock->lock); + LWLockRelease(diskquota_locks.black_map_lock); return true; } + +/* + * invalidate all black entry with a specific dbid in SHM + */ +void +diskquota_invalidate_db(Oid dbid) +{ + BlackMapEntry * entry; + HASH_SEQ_STATUS iter; + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, disk_quota_black_map); + while ((entry = hash_seq_search(&iter)) != NULL) + { + if (entry->databaseoid == dbid) + { + hash_search(disk_quota_black_map, entry, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.black_map_lock); +} diff --git a/sql/fini.sql b/sql/fini.sql deleted file mode 100644 index fa380b454c5..00000000000 --- a/sql/fini.sql +++ /dev/null @@ -1,8 +0,0 @@ -\! gpconfig -c diskquota.monitor_databases -v postgres > /dev/null -\! echo $? --- start_ignore -\! gpstop -u > /dev/null -\! echo $? --- end_ignore - -\! sleep 2 diff --git a/sql/init.sql b/sql/init.sql index 9e18ee24e18..e8b1d49854f 100644 --- a/sql/init.sql +++ b/sql/init.sql @@ -2,24 +2,17 @@ \! gpconfig -c shared_preload_libraries -v diskquota > /dev/null -- end_ignore \! echo $? - --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? - -- start_ignore -\! gpconfig -c diskquota.monitor_databases -v contrib_regression > /dev/null +\! gpconfig -c diskquota.naptime -v 2 > /dev/null -- end_ignore \! echo $? - -- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null +\! gpconfig -c max_worker_processes -v 20 > /dev/null -- end_ignore \! echo $? -- start_ignore -\! gpstop -u > /dev/null +\! gpstop -raf > /dev/null -- end_ignore \! echo $? diff --git a/sql/test_extension.sql b/sql/test_extension.sql new file mode 100644 index 00000000000..f92e9d7d9ff --- /dev/null +++ b/sql/test_extension.sql @@ -0,0 +1,203 @@ +-- NOTE: when test this script, you must make sure that there is no diskquota launcher +-- process or diskquota worker process. i.e. `ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l` +-- returns 0 +CREATE DATABASE dbx0 ; +CREATE DATABASE dbx1 ; +CREATE DATABASE dbx2 ; +CREATE DATABASE dbx3 ; +CREATE DATABASE dbx4 ; +CREATE DATABASE dbx5 ; +CREATE DATABASE dbx6 ; +CREATE DATABASE dbx7 ; +CREATE DATABASE dbx8 ; +CREATE DATABASE dbx9 ; +CREATE DATABASE dbx10 ; + +show max_worker_processes; + +\! sleep 4 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx0 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx1 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx2 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx3 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx4 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx5 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx6 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx7 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx8 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx9 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +INSERT INTO SX.a values(generate_series(0, 10000000)); +INSERT INTO SX.a values(generate_series(0, 10)); +DROP TABLE SX.a; + +\c dbx10 +CREATE EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx0 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx1 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx2 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx3 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx4 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx5 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx6 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx7 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx8 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx9 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c dbx10 +DROP EXTENSION diskquota; +\! sleep 2 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l + +\c postgres + +DROP DATABASE dbx0 ; +DROP DATABASE dbx1 ; +DROP DATABASE dbx2 ; +DROP DATABASE dbx3 ; +DROP DATABASE dbx4 ; +DROP DATABASE dbx5 ; +DROP DATABASE dbx6 ; +DROP DATABASE dbx7 ; +DROP DATABASE dbx8 ; +DROP DATABASE dbx9 ; +DROP DATABASE dbx10 ; diff --git a/sql/test_insert_after_drop.sql b/sql/test_insert_after_drop.sql new file mode 100644 index 00000000000..c0e4974d4d4 --- /dev/null +++ b/sql/test_insert_after_drop.sql @@ -0,0 +1,20 @@ +CREATE DATABASE db_insert_after_drop; +\c db_insert_after_drop +CREATE EXTENSION diskquota; +-- Test Drop Extension +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); +SET search_path TO sdrtbl; +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000000); +SELECT pg_sleep(5); +INSERT INTO a SELECT generate_series(1,100); +DROP EXTENSION diskquota; +-- no sleep, it will take effect immediately +INSERT INTO a SELECT generate_series(1,100); + +DROP TABLE a; +\c postgres +DROP DATABASE db_insert_after_drop; diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 27ad2e946c0..89eb39fcb68 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -1,34 +1,34 @@ -- Test schema -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); -set search_path to s1; +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SET search_path TO s1; -create table a(i int); -insert into a select generate_series(1,100); +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -insert into a select generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000000); -- expect insert fail -insert into a select generate_series(1,100); -create table a2(i int); +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int); -- expect insert fail -insert into a2 select generate_series(1,100); +INSERT INTO a2 SELECT generate_series(1,100); -- Test alter table set schema -create schema s2; -alter table s1.a set schema s2; -select pg_sleep(5); +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT pg_sleep(5); -- expect insert succeed -insert into a2 select generate_series(1,200); +INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed -insert into s2.a select generate_series(1,200); +INSERT INTO s2.a SELECT generate_series(1,200); -alter table s2.a set schema badquota; +ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed -insert into badquota.a select generate_series(0, 100); +INSERT INTO badquota.a SELECT generate_series(0, 100); -select schema_name, quota_in_mb from diskquota.show_schema_quota_view where schema_name = 's1'; +SELECT schema_name, quota_in_mb FROM diskquota.show_schema_quota_view WHERE schema_name = 's1'; -reset search_path; -drop table s1.a2, badquota.a; -drop schema s1, s2; +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2; From 4139bbecee167c0713b7c0d9f262d811cb137996 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 3 Jan 2019 14:44:11 +0800 Subject: [PATCH 011/330] Add fast table size check feature. table_size info are stored in the local memory of diskquota worker. It cannot be accessed from other backend directly. This patch stores the table_size info into user table diskquota.table_size for fast table size check. Diskquota worker initialization logic also changed to read table table_size instead of calculate all the table's size by UDF pg_total_relation_size. Detail design please refer to the Quota Status Checker part of wiki: https://github.com/greenplum-db/gpdb/wiki/Greenplum-Diskquota-Design#design-of-diskquota --- Makefile | 10 +- README.md | 34 ++-- diskquota--1.0.sql | 14 ++ diskquota.c | 274 ++++++++++++++++++++---------- diskquota.h | 29 ++-- diskquota_schedule | 1 + expected/test_extension.out | 30 ++-- expected/test_fast_disk_check.out | 22 +++ gp_activetable.c | 145 ++++++++++++---- init_file | 10 ++ quotamodel.c | 203 ++++++++++++++++++++-- sql/prepare.sql | 1 + sql/test_extension.sql | 15 +- sql/test_fast_disk_check.sql | 12 ++ 14 files changed, 594 insertions(+), 206 deletions(-) create mode 100644 expected/test_fast_disk_check.out create mode 100644 init_file create mode 100644 sql/test_fast_disk_check.sql diff --git a/Makefile b/Makefile index 3a63e6a638e..82e0f88c993 100644 --- a/Makefile +++ b/Makefile @@ -9,17 +9,9 @@ FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -SHLIB_PREREQS = submake-libpq REGRESS = dummy -REGRESS_OPTS = --schedule=diskquota_schedule +REGRESS_OPTS = --schedule=diskquota_schedule --init-file=init_file -ifdef USE_PGXS PGXS := $(shell pg_config --pgxs) include $(PGXS) -else -subdir = gpcontrib/gp_diskquota -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/README.md b/README.md index a472086c631..261cae3b7c1 100644 --- a/README.md +++ b/README.md @@ -83,15 +83,19 @@ That is to say, a role may have different quota limit on different databases and their disk usage is isolated between databases. # Install -1. Compile gpdb and disk quota is enabled by default. +1. Compile disk quota with pgxs. ``` -cd $gpdb_src; +cd $diskquota_src; make; make install; ``` -2. Enable diskquota as preload library (in future, we may set diskquota in -shared_preload_libraries by default). +2. Create database to store global information. +``` +create database diskquota; +``` + +3. Enable diskquota as preload library ``` # enable diskquota in preload library. gpconfig -c shared_preload_libraries -v 'diskquota' @@ -99,25 +103,20 @@ gpconfig -c shared_preload_libraries -v 'diskquota' gpstop -ar ``` -3. Config GUC of diskquota. +4. Config GUC of diskquota. ``` -# set monitored databases -gpconfig -c diskquota.monitor_databases -v 'postgres' # set naptime ( second ) to refresh the disk quota stats periodically gpconfig -c diskquota.naptime -v 2 ``` -4. Create diskquota extension in monitored database. +5. Create diskquota extension in monitored database. ``` create extension diskquota; ``` -5. Reload database configuraion +6. Initialize existing table size information is needed if `create extension` is not executed in a new created database. ``` -# reset monitored database list -gpconfig -c diskquota.monitor_databases -v 'postgres, postgres2' -# reload configuration -gpstop -u +select diskquota.init_table_size_table(); ``` # Usage @@ -174,7 +173,7 @@ select * from diskquota.show_schema_quota_view; # Test Run regression tests. ``` -cd gpcontrib/gp_diskquota; +cd diskquota_src; make installcheck ``` @@ -192,10 +191,9 @@ To be added. # Notes 1. Drop database with diskquota enabled. -If DBA enable monitoring diskquota on a database, there will be a connection -to this database from diskquota worker process. DBA need to first remove this -database from GUC diskquota.monitor_databases , and reload -configuration by call `gpstop -u`. Then database could be dropped successfully. +If DBA created diskquota extension in a database, there will be a connection +to this database from diskquota worker process. DBA need to firstly the drop diskquota +extension in this database, and then database could be dropped successfully. 2. Temp table. diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index abd2105fede..ffb89600b2a 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -20,11 +20,22 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE TABLE diskquota.table_size (tableid oid, size int8, PRIMARY KEY(tableid)); + +CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); + +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; + CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + CREATE VIEW diskquota.show_schema_quota_view AS SELECT pg_namespace.nspname as schema_name, pg_class.relnamespace as schema_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as nspsize_in_bytes FROM pg_namespace, pg_class, diskquota.quota_config as quota @@ -37,6 +48,9 @@ FROM pg_roles, pg_class, diskquota.quota_config as quota WHERE pg_class.relowner = quota.targetoid and pg_class.relowner = pg_roles.oid and quota.quotatype=1 GROUP BY pg_class.relowner, pg_roles.rolname, quota.quotalimitMB; +CREATE VIEW diskquota.database_size_view AS +SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; + CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type diff --git a/diskquota.c b/diskquota.c index 22dc302d056..567fed938bd 100644 --- a/diskquota.c +++ b/diskquota.c @@ -57,6 +57,7 @@ PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(diskquota_start_worker); +PG_FUNCTION_INFO_V1(init_table_size_table); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 120 @@ -70,25 +71,26 @@ static volatile sig_atomic_t got_sigterm = false; static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; /* disk quota worker info used by launcher to manage the worker processes. */ struct DiskQuotaWorkerEntry { - Oid dbid; - pid_t pid; /* worker pid */ + Oid dbid; + pid_t pid; /* worker pid */ BackgroundWorkerHandle *handle; }; DiskQuotaLocks diskquota_locks; -volatile MessageBox * message_box = NULL; +volatile MessageBox *message_box = NULL; + /* using hash table to support incremental update the table size entry.*/ static HTAB *disk_quota_worker_map = NULL; static object_access_hook_type next_object_access_hook; -static int num_db = 0; +static int num_db = 0; /* functions of disk quota*/ void _PG_init(void); @@ -100,16 +102,16 @@ static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); static int64 get_size_in_mb(char *str); static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static int start_worker_by_dboid(Oid dbid); +static int start_worker_by_dboid(Oid dbid); static void create_monitor_db_table(); static inline void exec_simple_utility(const char *sql); static void exec_simple_spi(const char *sql, int expected_code); static bool add_db_to_config(Oid dbid); static void del_db_from_config(Oid dbid); static void process_message_box(void); -static void process_message_box_internal(MessageResult *code); +static void process_message_box_internal(MessageResult * code); static void dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg); + Oid objectId, int subId, void *arg); static const char *err_code_to_err_message(MessageResult code); extern void diskquota_invalidate_db(Oid dbid); @@ -165,7 +167,7 @@ _PG_init(void) { return; } - /* Add dq_object_access_hook to handle drop extension event.*/ + /* Add dq_object_access_hook to handle drop extension event. */ next_object_access_hook = object_access_hook; object_access_hook = dq_object_access_hook; @@ -229,7 +231,8 @@ disk_quota_sighup(SIGNAL_ARGS) static void disk_quota_sigusr1(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; + got_sigusr1 = true; if (MyProc) @@ -269,15 +272,37 @@ disk_quota_worker_main(Datum main_arg) * immediately */ init_disk_quota_model(); + /* sleep 2 seconds to wait create extension statement finished */ + sleep(2); + while (!got_sigterm) + { + int rc; + + /* + * Check whether the state is in ready mode. The state would be + * unknown, when you `create extension diskquota` at the first time. + * After running UDF init_table_size_table() The state will changed to + * be ready. + */ + if (check_diskquota_state_is_ready()) + { + break; + } + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); + ResetLatch(&MyProc->procLatch); + } refresh_disk_quota_model(true); /* - * Set ps display name of the worker process of diskquota, - * so we can distinguish them quickly. - * Note: never mind parameter name of the function `init_ps_display`, - * we only want the ps name looks like 'bgworker: [diskquota] ...' + * Set ps display name of the worker process of diskquota, so we can + * distinguish them quickly. Note: never mind parameter name of the + * function `init_ps_display`, we only want the ps name looks like + * 'bgworker: [diskquota] ...' */ init_ps_display("bgworker:", "[diskquota]", dbname, ""); + /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -329,6 +354,7 @@ static void create_monitor_db_table() { const char *sql; + sql = "create schema if not exists diskquota_namespace;" "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; exec_simple_utility(sql); @@ -347,7 +373,7 @@ exec_simple_utility(const char *sql) static void exec_simple_spi(const char *sql, int expected_code) { - int ret; + int ret; ret = SPI_connect(); if (ret != SPI_OK_CONNECT) @@ -363,16 +389,17 @@ exec_simple_spi(const char *sql, int expected_code) static bool is_valid_dbid(Oid dbid) { - HeapTuple tuple; - - if (dbid == InvalidOid) - return false; - tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); - if (!HeapTupleIsValid(tuple)) - return false; - ReleaseSysCache(tuple); - return true; + HeapTuple tuple; + + if (dbid == InvalidOid) + return false; + tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); + if (!HeapTupleIsValid(tuple)) + return false; + ReleaseSysCache(tuple); + return true; } + /* * in early stage, start all worker processes of diskquota-enabled databases * from diskquota_namespace.database_list @@ -380,12 +407,13 @@ is_valid_dbid(Oid dbid) static void start_workers_from_dblist() { - TupleDesc tupdesc; - Oid fake_dbid[128]; - int fake_count = 0; - int num = 0; - int ret; - int i; + TupleDesc tupdesc; + Oid fake_dbid[128]; + int fake_count = 0; + int num = 0; + int ret; + int i; + StartTransactionCommand(); PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); @@ -400,12 +428,12 @@ start_workers_from_dblist() for (i = 0; num < SPI_processed; i++) { - HeapTuple tup; - Oid dbid; - Datum dat; - bool isnull; + HeapTuple tup; + Oid dbid; + Datum dat; + bool isnull; - tup = SPI_tuptable->vals[i]; + tup = SPI_tuptable->vals[i]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (isnull) { @@ -428,7 +456,7 @@ start_workers_from_dblist() PopActiveSnapshot(); CommitTransactionCommand(); - /* TODO: clean invalid database */ + /* TODO: clean invalid database */ } @@ -455,19 +483,21 @@ del_db_from_config(Oid dbid) /* * When drop exention database, diskquota laucher will receive a message - * to kill the diskquota worker process which monitoring the target database. + * to kill the diskquota worker process which monitoring the target database. */ static void try_kill_db_worker(Oid dbid) { DiskQuotaWorkerEntry *hash_entry; - bool found; - hash_entry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, - (void *)&dbid, - HASH_REMOVE, &found); + bool found; + + hash_entry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) &dbid, + HASH_REMOVE, &found); if (found) { BackgroundWorkerHandle *handle; + handle = hash_entry->handle; TerminateBackgroundWorker(handle); pfree(handle); @@ -480,7 +510,7 @@ try_kill_db_worker(Oid dbid) * we set it, and error out */ static void -on_add_db(Oid dbid, MessageResult *code) +on_add_db(Oid dbid, MessageResult * code) { if (num_db >= MAX_NUM_MONITORED_DB) { @@ -494,8 +524,8 @@ on_add_db(Oid dbid, MessageResult *code) } /* - * add dbid to diskquota_namespace.database_list - * set *code to ERR_ADD_TO_DB if any error occurs + * add dbid to diskquota_namespace.database_list set *code to + * ERR_ADD_TO_DB if any error occurs */ PG_TRY(); { @@ -560,11 +590,12 @@ disk_quota_launcher_main(Datum main_arg) hash_ctl.hash = oid_hash; disk_quota_worker_map = hash_create("disk quota worker map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + 1024, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); start_workers_from_dblist(); + /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -592,6 +623,7 @@ disk_quota_launcher_main(Datum main_arg) got_sigusr1 = false; process_message_box(); } + /* * In case of a SIGHUP, just reload the configuration. */ @@ -616,11 +648,11 @@ start_worker_by_dboid(Oid dbid) BackgroundWorkerHandle *handle; BgwHandleStatus status; MemoryContext old_ctx; - char *dbname; - pid_t pid; - bool found; - bool ok; - DiskQuotaWorkerEntry* workerentry; + char *dbname; + pid_t pid; + bool found; + bool ok; + DiskQuotaWorkerEntry *workerentry; memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | @@ -658,9 +690,9 @@ start_worker_by_dboid(Oid dbid) Assert(status == BGWH_STARTED); /* put the worker handle into the worker map */ - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, - (void *)&dbid, - HASH_ENTER, &found); + workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) &dbid, + HASH_ENTER, &found); if (!found) { workerentry->handle = handle; @@ -701,6 +733,64 @@ set_role_quota(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * init table diskquota.table_size. + * calculate table size by UDF pg_total_relation_size + */ +Datum +init_table_size_table(PG_FUNCTION_ARGS) +{ + int ret; + StringInfoData buf; + + RangeVar *rv; + Relation rel; + + /* ensure table diskquota.state exists */ + rv = makeRangeVar("diskquota", "state", -1); + rel = heap_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + } + heap_close(rel, NoLock); + + SPI_connect(); + + /* delete all the table size info in table_size if exist. */ + initStringInfo(&buf); + appendStringInfo(&buf, "delete from diskquota.table_size;"); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "cannot delete table_size table: error code %d", ret); + + /* fill table_size table with table oid and size info. */ + resetStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.table_size " + "select oid, pg_total_relation_size(oid) from pg_class " + "where oid> %u and (relkind='r' or relkind='m');", + FirstNormalObjectId); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert table_size table: error code %d", ret); + + /* set diskquota state to ready. */ + resetStringInfo(&buf); + appendStringInfo(&buf, + "update diskquota.state set state = %u;", + DISKQUOTA_READY_STATE); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_UPDATE) + elog(ERROR, "cannot update state table: error code %d", ret); + + SPI_finish(); + PG_RETURN_VOID(); +} + /* * Set disk quota limit for schema. */ @@ -757,7 +847,6 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) if (SPI_processed == 0 && quota_limit_mb > 0) { resetStringInfo(&buf); - initStringInfo(&buf); appendStringInfo(&buf, "insert into diskquota.quota_config values(%u,%d,%ld);", targetoid, type, quota_limit_mb); @@ -768,7 +857,6 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) else if (SPI_processed > 0 && quota_limit_mb <= 0) { resetStringInfo(&buf); - initStringInfo(&buf); appendStringInfo(&buf, "delete from diskquota.quota_config where targetoid=%u" " and quotatype=%d;", @@ -780,7 +868,6 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) else if (SPI_processed > 0 && quota_limit_mb > 0) { resetStringInfo(&buf); - initStringInfo(&buf); appendStringInfo(&buf, "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" " and quotatype=%d;", @@ -938,14 +1025,15 @@ get_size_in_mb(char *str) /* * trigger start diskquota worker when create extension diskquota - * This function is called at backend side, and will send message to + * This function is called at backend side, and will send message to * diskquota launcher. Luacher process is responsible for starting the real * diskquota worker process. */ Datum diskquota_start_worker(PG_FUNCTION_ARGS) { - int rc; + int rc; + elog(LOG, "[diskquota]:DB = %d, MyProc=%p launcher pid=%d", MyDatabaseId, MyProc, message_box->launcher_pid); LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); message_box->req_pid = MyProcPid; @@ -956,8 +1044,9 @@ diskquota_start_worker(PG_FUNCTION_ARGS) rc = kill(message_box->launcher_pid, SIGUSR1); if (rc == 0) { - int count = WAIT_TIME_COUNT; - while(count-- > 0) + int count = WAIT_TIME_COUNT; + + while (count-- > 0) { rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, @@ -972,12 +1061,12 @@ diskquota_start_worker(PG_FUNCTION_ARGS) message_box->req_pid = 0; LWLockRelease(diskquota_locks.message_box_lock); if (message_box->result != ERR_OK) - elog(ERROR, "%s", err_code_to_err_message((MessageResult)message_box->result)); + elog(ERROR, "%s", err_code_to_err_message((MessageResult) message_box->result)); PG_RETURN_VOID(); } static void -process_message_box_internal(MessageResult *code) +process_message_box_internal(MessageResult * code) { Assert(message_box->launcher_pid == MyProcPid); switch (message_box->cmd) @@ -998,7 +1087,7 @@ process_message_box_internal(MessageResult *code) } /* - * this function is called by launcher process to handle message from other backend + * this function is called by launcher process to handle message from other backend * processes which call CREATE/DROP EXTENSION diskquota; It must be able to catch errors, * and return an error code back to the backend process. */ @@ -1006,7 +1095,8 @@ static void process_message_box() { MessageResult code = ERR_UNKNOWN; - int old_num_db = num_db; + int old_num_db = num_db; + if (message_box->req_pid == 0) return; elog(LOG, "[launcher]: received message"); @@ -1028,7 +1118,7 @@ process_message_box() } PG_END_TRY(); - message_box->result = (int)code; + message_box->result = (int) code; } /* @@ -1039,19 +1129,20 @@ process_message_box() */ static void dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg) + Oid objectId, int subId, void *arg) { - Oid oid; - int rc; + Oid oid; + int rc; + if (access != OAT_DROP || classId != ExtensionRelationId) goto out; oid = get_extension_oid("diskquota", true); if (oid != objectId) goto out; + /* - * invoke drop extension diskquota - * 1. stop bgworker for MyDatabaseId - * 2. remove dbid from diskquota_namespace.database_list in postgres + * invoke drop extension diskquota 1. stop bgworker for MyDatabaseId 2. + * remove dbid from diskquota_namespace.database_list in postgres */ LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); message_box->req_pid = MyProcPid; @@ -1061,8 +1152,9 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, rc = kill(message_box->launcher_pid, SIGUSR1); if (rc == 0) { - int count = WAIT_TIME_COUNT; - while(count-- >0) + int count = WAIT_TIME_COUNT; + + while (count-- > 0) { rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, @@ -1077,25 +1169,33 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, message_box->req_pid = 0; LWLockRelease(diskquota_locks.message_box_lock); if (message_box->result != ERR_OK) - elog(ERROR, "[diskquota] %s", err_code_to_err_message((MessageResult)message_box->result)); + elog(ERROR, "[diskquota] %s", err_code_to_err_message((MessageResult) message_box->result)); elog(LOG, "[diskquota] DROP EXTENTION diskquota; OK"); out: if (next_object_access_hook) - (*next_object_access_hook)(access, classId, objectId, - subId, arg); + (*next_object_access_hook) (access, classId, objectId, + subId, arg); } -static const char *err_code_to_err_message(MessageResult code) +static const char * +err_code_to_err_message(MessageResult code) { switch (code) { - case ERR_PENDING: return "no response from launcher, or timeout"; - case ERR_OK: return "NO ERROR"; - case ERR_EXCEED: return "too many database to monitor"; - case ERR_ADD_TO_DB: return "add dbid to database_list failed"; - case ERR_START_WORKER: return "start worker failed"; - case ERR_INVALID_DBID: return "invalid dbid"; - default: return "unknown error"; + case ERR_PENDING: + return "no response from launcher, or timeout"; + case ERR_OK: + return "NO ERROR"; + case ERR_EXCEED: + return "too many database to monitor"; + case ERR_ADD_TO_DB: + return "add dbid to database_list failed"; + case ERR_START_WORKER: + return "start worker failed"; + case ERR_INVALID_DBID: + return "invalid dbid"; + default: + return "unknown error"; } } diff --git a/diskquota.h b/diskquota.h index e2b5a9f3be8..d99d975873a 100644 --- a/diskquota.h +++ b/diskquota.h @@ -16,11 +16,17 @@ typedef enum FETCH_ACTIVE_SIZE /* fetch size for active tables */ } FetchTableStatType; +typedef enum +{ + DISKQUOTA_UNKNOWN_STATE, + DISKQUOTA_READY_STATE +} DiskQuotaState; + struct DiskQuotaLocks { - LWLock *active_table_lock; - LWLock *black_map_lock; - LWLock *message_box_lock; + LWLock *active_table_lock; + LWLock *black_map_lock; + LWLock *message_box_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; @@ -36,11 +42,13 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; */ struct MessageBox { - int launcher_pid; - int req_pid; /* pid of the request process */ - int cmd; /* message command type, see MessageCommand */ - int result; /* message result writen by launcher, see MessageResult */ - int data[4]; /* for create/drop extension diskquota, data[0] is dbid */ + int launcher_pid; + int req_pid; /* pid of the request process */ + int cmd; /* message command type, see MessageCommand */ + int result; /* message result writen by launcher, see + * MessageResult */ + int data[4]; /* for create/drop extension diskquota, + * data[0] is dbid */ }; enum MessageCommand @@ -79,13 +87,14 @@ extern void diskquota_invalidate_db(Oid dbid); extern void init_disk_quota_shmem(void); extern void init_disk_quota_model(void); extern void refresh_disk_quota_model(bool force); +extern bool check_diskquota_state_is_ready(void); extern bool quota_check_common(Oid reloid); /* quotaspi interface */ extern void init_disk_quota_hook(void); extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); -extern int diskquota_naptime; -extern int diskquota_max_active_tables; +extern int diskquota_naptime; +extern int diskquota_max_active_tables; #endif diff --git a/diskquota_schedule b/diskquota_schedule index 61fadf26c7c..d973cf10f45 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -4,5 +4,6 @@ test: test_role test_schema test_drop_table test_column test_copy test_update te test: test_partition test: test_vacuum test: test_extension +test: test_fast_disk_check test: clean test: test_insert_after_drop diff --git a/expected/test_extension.out b/expected/test_extension.out index 2a64469cf71..60eacedf326 100644 --- a/expected/test_extension.out +++ b/expected/test_extension.out @@ -42,22 +42,27 @@ INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx1 -CREATE EXTENSION diskquota; -\! sleep 2 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -4 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO SX.a values(generate_series(0, 100000)); +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +\! sleep 5 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +4 INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -203,22 +208,13 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: too many database to monitor (diskquota.c:975) +ERROR: too many database to monitor (diskquota.c:1056) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -SELECT diskquota.set_schema_quota('SX', '1MB'); -ERROR: schema "diskquota" does not exist -INSERT INTO SX.a values(generate_series(0, 10000000)); -INSERT INTO SX.a values(generate_series(0, 10)); -DROP TABLE SX.a; \c dbx10 CREATE EXTENSION diskquota; -ERROR: too many database to monitor (diskquota.c:975) +ERROR: too many database to monitor (diskquota.c:1056) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 diff --git a/expected/test_fast_disk_check.out b/expected/test_fast_disk_check.out new file mode 100644 index 00000000000..faabff80387 --- /dev/null +++ b/expected/test_fast_disk_check.out @@ -0,0 +1,22 @@ +-- Test SCHEMA +CREATE SCHEMA s1; +SET search_path to s1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,2000000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; + ?column? +---------- + t +(1 row) + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/gp_activetable.c b/gp_activetable.c index 0a885ea4dc4..8a0a5188f6c 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -21,11 +21,13 @@ #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" #include "cdb/cdbvars.h" +#include "commands/dbcommands.h" #include "executor/spi.h" #include "fmgr.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "storage/shmem.h" #include "storage/smgr.h" #include "utils/array.h" @@ -73,11 +75,12 @@ static StringInfoData convert_map_to_string(HTAB *active_list); static HTAB *pull_active_list_from_seg(void); static void report_active_table_SmgrStat(SMgrRelation reln); static void report_active_table_AO(BufferedAppend * bufferedAppend); +static void load_table_size(HTAB *local_table_stats_map); void init_active_table_hook(void); void init_shm_worker_active_tables(void); void init_lock_active_tables(void); -HTAB *gp_fetch_active_tables(bool force); +HTAB *gp_fetch_active_tables(bool is_init); /* * Register smgr hook to detect active table. @@ -573,6 +576,74 @@ get_active_tables(void) return local_active_table_stats_map; } +/* + * Load table size info from diskquota.table_size table. +*/ +static void +load_table_size(HTAB *local_table_stats_map) +{ + int ret; + TupleDesc tupdesc; + int i; + bool found; + DiskQuotaActiveTableEntry *quota_entry; + + RangeVar *rv; + Relation rel; + + rv = makeRangeVar("diskquota", "table_size", -1); + rel = heap_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("table \"table_size\" is missing in database \"%s\", please recreate diskquota extension", get_database_name(MyDatabaseId)))); + } + heap_close(rel, NoLock); + + + + ret = SPI_execute("select tableid, size from diskquota.table_size", true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errmsg("SPI_execute failed: error code %d", ret))); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 2 || + ((tupdesc)->attrs[0])->atttypid != OIDOID || + ((tupdesc)->attrs[1])->atttypid != INT8OID) + { + ereport(ERROR, (errmsg("table \"table_size\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + } + + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + Oid tableOid; + int64 size; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + continue; + tableOid = DatumGetObjectId(dat); + + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); + if (isnull) + continue; + size = DatumGetInt64(dat); + + + quota_entry = (DiskQuotaActiveTableEntry *) hash_search( + local_table_stats_map, + &tableOid, + HASH_ENTER, &found); + quota_entry->tableoid = tableOid; + quota_entry->tablesize = size; + } + return; +} /* * Worker process at master need to collect @@ -581,7 +652,7 @@ get_active_tables(void) * to obtainer the real table size at cluster level. */ HTAB * -gp_fetch_active_tables(bool force) +gp_fetch_active_tables(bool is_init) { CdbPgResults cdb_pgresults = {NULL, 0}; int i, @@ -606,9 +677,9 @@ gp_fetch_active_tables(bool force) &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - if (force) + if (is_init) { - sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; + load_table_size(local_table_stats_map); } else { @@ -618,52 +689,52 @@ gp_fetch_active_tables(bool force) appendStringInfo(&buffer, "select * from diskquota.diskquota_fetch_table_stat(2, '%s'::oid[])", map_string.data); sql = buffer.data; - } - elog(DEBUG1, "CHECK SPI QUERY is %s", sql); + elog(DEBUG1, "CHECK SPI QUERY is %s", sql); - CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); + CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); - /* collect data from each segment */ - for (i = 0; i < cdb_pgresults.numResults; i++) - { - - Size tableSize; - bool found; - Oid tableOid; - DiskQuotaActiveTableEntry *entry; - - struct pg_result *pgresult = cdb_pgresults.pg_results[i]; - - if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + /* collect data from each segment */ + for (i = 0; i < cdb_pgresults.numResults; i++) { - cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("unexpected result from segment: %d", - PQresultStatus(pgresult)))); - } - for (j = 0; j < PQntuples(pgresult); j++) - { - tableOid = atooid(PQgetvalue(pgresult, j, 0)); - tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + Size tableSize; + bool found; + Oid tableOid; + DiskQuotaActiveTableEntry *entry; - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &tableOid, HASH_ENTER, &found); + struct pg_result *pgresult = cdb_pgresults.pg_results[i]; - if (!found) + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { - entry->tableoid = tableOid; - entry->tablesize = tableSize; + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("unexpected result from segment: %d", + PQresultStatus(pgresult)))); } - else + + for (j = 0; j < PQntuples(pgresult); j++) { - entry->tablesize = entry->tablesize + tableSize; - } + tableOid = atooid(PQgetvalue(pgresult, j, 0)); + tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + + entry = (DiskQuotaActiveTableEntry *) hash_search( + local_table_stats_map, &tableOid, HASH_ENTER, &found); + + if (!found) + { + entry->tableoid = tableOid; + entry->tablesize = tableSize; + } + else + { + entry->tablesize = entry->tablesize + tableSize; + } + } } - + cdbdisp_clearCdbPgResults(&cdb_pgresults); } - cdbdisp_clearCdbPgResults(&cdb_pgresults); return local_table_stats_map; } diff --git a/init_file b/init_file new file mode 100644 index 00000000000..34a7542c0d5 --- /dev/null +++ b/init_file @@ -0,0 +1,10 @@ +-- This file contains global patterns of messages that should be ignored or +-- masked out, when comparing test results with the expected output. +-- Individual tests can contain additional patterns specific to the test. + +-- start_matchignore +-- end_matchignore +-- start_matchsubs +m/diskquota.c:\d+\)/ +s/diskquota.c:\d+\)/diskquota.c:xxx/ +-- end_matchsubs diff --git a/quotamodel.c b/quotamodel.c index 590a28ed869..8dbce83c2d7 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -70,6 +70,7 @@ struct TableSizeEntry int64 totalsize; bool is_exist; /* flag used to check whether table is already * dropped */ + bool need_flush; /* whether need to flush to table table_size */ }; /* local cache of namespace disk size */ @@ -122,10 +123,11 @@ static HTAB *local_disk_quota_black_map = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to refresh disk quota model*/ -static void refresh_disk_quota_usage(bool force); -static void calculate_table_disk_usage(bool force); +static void refresh_disk_quota_usage(bool is_init); +static void calculate_table_disk_usage(bool is_init); static void calculate_schema_disk_usage(void); static void calculate_role_disk_usage(void); +static void flush_to_table_size(void); static void flush_local_black_map(void); static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type); static void update_namespace_map(Oid namespaceoid, int64 updatesize); @@ -133,10 +135,12 @@ static void update_role_map(Oid owneroid, int64 updatesize); static void remove_namespace_map(Oid namespaceoid); static void remove_role_map(Oid owneroid); static bool load_quotas(void); +static bool do_check_diskquota_state_is_ready(void); static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); + /* * DiskQuotaShmemSize * Compute space needed for diskquota-related shared memory @@ -159,6 +163,7 @@ init_lwlocks(void) diskquota_locks.black_map_lock = LWLockAssign(); diskquota_locks.message_box_lock = LWLockAssign(); } + /* * DiskQuotaShmemInit * Allocate and initialize diskquota-related shared memory @@ -178,10 +183,10 @@ disk_quota_shmem_startup(void) init_lwlocks(); message_box = ShmemInitStruct("disk_quota_message_box", - sizeof(MessageBox), - &found); + sizeof(MessageBox), + &found); if (!found) - memset((void*)message_box, 0, sizeof(MessageBox)); + memset((void *) message_box, 0, sizeof(MessageBox)); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); @@ -287,13 +292,97 @@ init_disk_quota_model(void) HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } +/* + * Check whether the diskquota state is ready + */ +static bool +do_check_diskquota_state_is_ready(void) +{ + int ret; + TupleDesc tupdesc; + int i; + + RangeVar *rv; + Relation rel; + + /* check table diskquota.state exists */ + rv = makeRangeVar("diskquota", "state", -1); + rel = heap_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + return false; + } + heap_close(rel, NoLock); + + /* check diskquota state from table diskquota.state */ + ret = SPI_execute("select state from diskquota.state", true, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "SPI_execute failed: error code %d", ret); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 1 || + ((tupdesc)->attrs[0])->atttypid != INT4OID) + { + elog(ERROR, "table \"state\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + return false; + } + + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + int state; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + continue; + state = DatumGetInt64(dat); + + if (state == DISKQUOTA_READY_STATE) + { + return true; + } + } + ereport(LOG, (errmsg("Diskquota is not in ready state. " + "please run UDF init_table_size_table()"))); + return false; +} + +/* + * Check whether the diskquota state is ready +*/ +bool +check_diskquota_state_is_ready(void) +{ + bool ret; + + StartTransactionCommand(); + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + + ret = do_check_diskquota_state_is_ready(); + + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + + return ret; +} + /* * diskquota worker will refresh disk quota model * periodically. It will reload quota setting and * recalculate the changed disk usage. */ void -refresh_disk_quota_model(bool force) +refresh_disk_quota_model(bool is_init) { elog(DEBUG1, "check disk quota begin"); StartTransactionCommand(); @@ -302,7 +391,7 @@ refresh_disk_quota_model(bool force) /* skip refresh model when load_quotas failed */ if (load_quotas()) { - refresh_disk_quota_usage(force); + refresh_disk_quota_usage(is_init); } SPI_finish(); PopActiveSnapshot(); @@ -321,6 +410,8 @@ refresh_disk_quota_usage(bool force) calculate_table_disk_usage(force); calculate_schema_disk_usage(); calculate_role_disk_usage(); + /* flush local table_size_map to user table table_size */ + flush_to_table_size(); /* copy local black map back to shared black map */ flush_local_black_map(); } @@ -518,7 +609,7 @@ update_role_map(Oid owneroid, int64 updatesize) * */ static void -calculate_table_disk_usage(bool force) +calculate_table_disk_usage(bool is_init) { bool found; bool active_tbl_found = false; @@ -534,7 +625,7 @@ calculate_table_disk_usage(bool force) classRel = heap_open(RelationRelationId, AccessShareLock); relScan = heap_beginscan_catalog(classRel, 0, NULL); - local_active_table_stat_map = gp_fetch_active_tables(force); + local_active_table_stat_map = gp_fetch_active_tables(is_init); /* unset is_exist flag for tsentry in table_size_map */ hash_seq_init(&iter, table_size_map); @@ -571,7 +662,7 @@ calculate_table_disk_usage(bool force) tsentry->totalsize = 0; tsentry->owneroid = 0; tsentry->namespaceoid = 0; - tsentry->reloid = 0; + tsentry->need_flush = true; } /* mark tsentry is_exist */ @@ -595,6 +686,7 @@ calculate_table_disk_usage(bool force) tsentry->namespaceoid = classForm->relnamespace; tsentry->owneroid = classForm->relowner; tsentry->totalsize = (int64) active_table_entry->tablesize; + tsentry->need_flush = true; update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); update_role_map(tsentry->owneroid, tsentry->totalsize); } @@ -607,11 +699,18 @@ calculate_table_disk_usage(bool force) int64 oldtotalsize = tsentry->totalsize; tsentry->totalsize = (int64) active_table_entry->tablesize; + tsentry->need_flush = true; update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); } } + /* table size info doesn't need to flush at init quota model stage */ + if (is_init) + { + tsentry->need_flush = false; + } + /* if schema change, transfer the file size */ if (tsentry->namespaceoid != classForm->relnamespace) { @@ -632,7 +731,10 @@ calculate_table_disk_usage(bool force) heap_close(classRel, AccessShareLock); hash_destroy(local_active_table_stat_map); - /* process removed tables */ + /* + * Process removed tables. Reduce schema and role size firstly. Remove + * table from table_size_map in flush_to_table_size() function later. + */ hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { @@ -640,11 +742,6 @@ calculate_table_disk_usage(bool force) { update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); - - hash_search(table_size_map, - &tsentry->reloid, - HASH_REMOVE, NULL); - continue; } } } @@ -703,6 +800,75 @@ calculate_role_disk_usage(void) } } +/* + * Flush the table_size_map to user table diskquota.table_size + * To improve update performance, we first delete all the need_to_flush + * entries in table table_size. And then insert new table size entries into + * table table_size. + */ +static +void +flush_to_table_size(void) +{ + HASH_SEQ_STATUS iter; + TableSizeEntry *tsentry = NULL; + StringInfoData delete_statement; + StringInfoData insert_statement; + bool delete_statement_flag = false; + bool insert_statement_flag = false; + int ret; + + /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ + + /* concatenate all the need_to_flush table to SQL string */ + initStringInfo(&delete_statement); + appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ("); + initStringInfo(&insert_statement); + appendStringInfo(&insert_statement, "insert into diskquota.table_size values "); + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + /* delete dropped table from both table_size_map and table table_size */ + if (tsentry->is_exist == false) + { + appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + delete_statement_flag = true; + + hash_search(table_size_map, + &tsentry->reloid, + HASH_REMOVE, NULL); + } + /* update the table size by delete+insert in table table_size */ + else if (tsentry->need_flush == true) + { + tsentry->need_flush = false; + appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + appendStringInfo(&insert_statement, "(%u,%ld), ", tsentry->reloid, tsentry->totalsize); + delete_statement_flag = true; + insert_statement_flag = true; + } + } + truncateStringInfo(&delete_statement, delete_statement.len - strlen(", ")); + truncateStringInfo(&insert_statement, insert_statement.len - strlen(", ")); + appendStringInfo(&delete_statement, ");"); + appendStringInfo(&insert_statement, ";"); + + if (delete_statement_flag) + { + elog(DEBUG1, "[diskquota] table_size delete_statement: %s", delete_statement.data); + ret = SPI_execute(delete_statement.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "SPI_execute failed: error code %d", ret); + } + if (insert_statement_flag) + { + elog(DEBUG1, "[diskquota] table_size insert_statement: %s", insert_statement.data); + ret = SPI_execute(insert_statement.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "SPI_execute failed: error code %d", ret); + } +} + /* * Load quotas from diskquota configuration table(quota_config). */ @@ -755,7 +921,7 @@ load_quotas(void) ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); if (ret != SPI_OK_SELECT) - elog(FATAL, "SPI_execute failed: error code %d", ret); + elog(ERROR, "SPI_execute failed: error code %d", ret); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 3 || @@ -892,8 +1058,9 @@ quota_check_common(Oid reloid) void diskquota_invalidate_db(Oid dbid) { - BlackMapEntry * entry; + BlackMapEntry *entry; HASH_SEQ_STATUS iter; + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); hash_seq_init(&iter, disk_quota_black_map); while ((entry = hash_seq_search(&iter)) != NULL) diff --git a/sql/prepare.sql b/sql/prepare.sql index 1c802fc8a90..21c888e8a65 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -1,6 +1,7 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u +SELECT diskquota.init_table_size_table(); -- end_ignore SELECT pg_sleep(1); \! cp data/csmall.txt /tmp/csmall.txt diff --git a/sql/test_extension.sql b/sql/test_extension.sql index f92e9d7d9ff..18bcc611b43 100644 --- a/sql/test_extension.sql +++ b/sql/test_extension.sql @@ -30,13 +30,14 @@ INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; \c dbx1 -CREATE EXTENSION diskquota; -\! sleep 2 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); +INSERT INTO SX.a values(generate_series(0, 100000)); +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +\! sleep 5 +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -121,12 +122,6 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); -SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 10000000)); -INSERT INTO SX.a values(generate_series(0, 10)); -DROP TABLE SX.a; \c dbx10 CREATE EXTENSION diskquota; diff --git a/sql/test_fast_disk_check.sql b/sql/test_fast_disk_check.sql new file mode 100644 index 00000000000..fdcddb628f3 --- /dev/null +++ b/sql/test_fast_disk_check.sql @@ -0,0 +1,12 @@ +-- Test SCHEMA +CREATE SCHEMA s1; +SET search_path to s1; + +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,2000000); +SELECT pg_sleep(5); +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; + From 3e250b338827950dc39039b33fa8ae072d699a7a Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 15 Jan 2019 08:53:29 +0000 Subject: [PATCH 012/330] Add pipeline for diskquota. Also fix some bugs of diskquota. 1. Shorten the lock held time to avoid deadlock 2. Add truncateStringInfo 3. Add extension level lock 4. Recheck in-transaction relfilenode. --- concourse/pipeline/pipeline.yml | 177 +++++++++++++++++++++++++ concourse/scripts/build_diskquota.sh | 35 +++++ concourse/scripts/test_diskquota.sh | 61 +++++++++ concourse/tasks/build_diskquota.yml | 16 +++ concourse/tasks/test_diskquota.yml | 14 ++ diskquota.c | 188 +++++++++++++++++---------- diskquota.h | 10 +- diskquota_schedule | 4 +- expected/prepare.out | 2 +- expected/test_column.out | 4 +- expected/test_copy.out | 2 +- expected/test_delete_quota.out | 4 +- expected/test_drop_table.out | 2 +- expected/test_extension.out | 4 +- expected/test_fast_disk_check.out | 2 +- expected/test_insert_after_drop.out | 2 +- expected/test_partition.out | 4 +- expected/test_reschema.out | 2 +- expected/test_role.out | 2 +- expected/test_schema.out | 2 +- expected/test_temp_role.out | 2 +- expected/test_toast.out | 2 +- expected/test_truncate.out | 4 +- expected/test_update.out | 2 +- expected/test_vacuum.out | 4 +- gp_activetable.c | 49 ++++--- quotamodel.c | 86 ++++++++---- sql/prepare.sql | 2 +- sql/test_column.sql | 4 +- sql/test_copy.sql | 2 +- sql/test_delete_quota.sql | 4 +- sql/test_drop_table.sql | 2 +- sql/test_fast_disk_check.sql | 2 +- sql/test_insert_after_drop.sql | 2 +- sql/test_partition.sql | 4 +- sql/test_reschema.sql | 2 +- sql/test_role.sql | 2 +- sql/test_schema.sql | 2 +- sql/test_temp_role.sql | 2 +- sql/test_toast.sql | 2 +- sql/test_truncate.sql | 4 +- sql/test_update.sql | 2 +- sql/test_vacuum.sql | 4 +- 43 files changed, 568 insertions(+), 160 deletions(-) create mode 100644 concourse/pipeline/pipeline.yml create mode 100755 concourse/scripts/build_diskquota.sh create mode 100755 concourse/scripts/test_diskquota.sh create mode 100644 concourse/tasks/build_diskquota.yml create mode 100644 concourse/tasks/test_diskquota.yml diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml new file mode 100644 index 00000000000..8484259ffe4 --- /dev/null +++ b/concourse/pipeline/pipeline.yml @@ -0,0 +1,177 @@ +## ====================================================================== +## Pipeline for GPDB PL/R GPPKG +## ====================================================================== + +groups: +- name: GPDB6 + jobs: + - diskquota_centos6_build + - diskquota_centos7_build + - diskquota_centos6_test + - diskquota_centos7_test + +resources: + +# Image Resources + +- name: centos-gpdb-dev-6 + type: docker-image + source: + repository: pivotaldata/centos-gpdb-dev + tag: '6-gcc6.2-llvm3.7' + +- name: centos-gpdb-dev-7 + type: docker-image + source: + repository: pivotaldata/centos-gpdb-dev + tag: '7-gcc6.2-llvm3.7' + +# Github Source Codes + +- name: gpdb_src + type: git + source: + branch: {{gpdb-git-branch}} + uri: {{gpdb-git-remote}} + +- name: diskquota_src + type: git + source: + branch: gpdbpipeline + uri: https://github.com/zhangh43/diskquota.git + +# centos 7 + +- name: bin_diskquota_centos7 + type: s3 + source: + bucket: {{pl-bucket-name}} + region_name: {{aws-region}} + access_key_id: {{bucket-access-key-id}} + secret_access_key: {{bucket-secret-access-key}} + versioned_file: build/gpdb6/diskquota/component_diskquota.tar.gz + + +- name: bin_gpdb_centos7 + type: s3 + source: + bucket: {{bucket-name}} + region_name: {{aws-region}} + access_key_id: {{bucket-access-key-id}} + secret_access_key: {{bucket-secret-access-key}} + versioned_file: bin_gpdb_centos7/bin_gpdb.tar.gz + +- name: bin_diskquota_centos6 + type: s3 + source: + bucket: {{pl-bucket-name}} + region_name: {{aws-region}} + access_key_id: {{bucket-access-key-id}} + secret_access_key: {{bucket-secret-access-key}} + versioned_file: build/gpdb6/diskquota/component_diskquota.tar.gz + +- name: bin_gpdb_centos6 + type: s3 + source: + bucket: {{bucket-name}} + region_name: {{aws-region}} + access_key_id: {{bucket-access-key-id}} + secret_access_key: {{bucket-secret-access-key}} + versioned_file: bin_gpdb_centos/bin_gpdb.tar.gz + +## jobs +## ====================================================================== + +jobs: +# Build PLR GPPKG + +- name: diskquota_centos7_build + max_in_flight: 3 + plan: + - aggregate: + - get: centos-gpdb-dev-7 + - get: diskquota_src + trigger: true + - get: bin_gpdb_centos7 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-7 + input_mapping: + bin_gpdb: bin_gpdb_centos7 + output_mapping: + bin_diskquota: bin_diskquota_centos7 + params: + OSVER: centos7 + GPDBVER: gp6 + - aggregate: + - put: bin_diskquota_centos7 + params: + file: diskquota_artifacts/component_diskquota.tar.gz + +- name: diskquota_centos6_build + max_in_flight: 3 + plan: + - aggregate: + - get: centos-gpdb-dev-6 + - get: diskquota_src + trigger: true + - get: bin_gpdb_centos6 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-6 + input_mapping: + bin_gpdb: bin_gpdb_centos6 + output_mapping: + bin_diskquota: bin_diskquota_centos6 + params: + OSVER: centos6 + GPDBVER: gp6 + - aggregate: + - put: bin_diskquota_centos6 + params: + file: diskquota_artifacts/component_diskquota.tar.gz + + +- name: diskquota_centos6_test + plan: + - aggregate: + - get: centos-gpdb-dev-6 + - get: diskquota_src + - get: bin_diskquota_centos6 + passed: [diskquota_centos6_build] + trigger: true + - get: bin_gpdb_centos6 + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-6 + input_mapping: + bin_gpdb: bin_gpdb_centos6 + bin_diskquota: bin_diskquota_centos6 + params: + OSVER: centos6 + GPDBVER: gp6 + +- name: diskquota_centos7_test + plan: + - aggregate: + - get: centos-gpdb-dev-7 + - get: diskquota_src + - get: bin_diskquota_centos7 + passed: [diskquota_centos7_build] + trigger: true + - get: bin_gpdb_centos7 + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-7 + input_mapping: + bin_gpdb: bin_gpdb_centos7 + bin_diskquota: bin_diskquota_centos7 + params: + OSVER: centos7 + GPDBVER: gp6 diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh new file mode 100755 index 00000000000..a75f9a9d431 --- /dev/null +++ b/concourse/scripts/build_diskquota.sh @@ -0,0 +1,35 @@ +#!/bin/bash -l + +set -exo pipefail + +CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +TOP_DIR=${CWDIR}/../../../ + +source "${TOP_DIR}/gpdb_src/concourse/scripts/common.bash" +function pkg() { + source /opt/gcc_env.sh + source /usr/local/greenplum-db-devel/greenplum_path.sh + + export USE_PGXS=1 + pushd diskquota_src/ + make clean + make install + popd + + pushd /usr/local/greenplum-db-devel/ + echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component + chmod a+x install_gpdb_component + tar -czf $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz \ + lib/postgresql/diskquota.so \ + share/postgresql/extension/diskquota.control \ + share/postgresql/extension/diskquota--1.0.sql \ + install_gpdb_component + popd +} + +function _main() { + time install_gpdb + time pkg +} + +_main "$@" diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh new file mode 100755 index 00000000000..0acccfc7423 --- /dev/null +++ b/concourse/scripts/test_diskquota.sh @@ -0,0 +1,61 @@ +#!/bin/bash -l + +set -exo pipefail + +CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +TOP_DIR=${CWDIR}/../../../ +if [ "$GPDBVER" == "GPDB4.3" ]; then + GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/ci/concourse/scripts +else + GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts +fi +source "${GPDB_CONCOURSE_DIR}/common.bash" +function test(){ + sudo chown -R gpadmin:gpadmin ${TOP_DIR}; + cat > /home/gpadmin/test.sh <<-EOF + set -exo pipefail + source /usr/local/greenplum-db-devel/greenplum_path.sh + export PGPORT=15432 + createdb diskquota + gpconfig -c shared_preload_libraries -v 'diskquota' + gpstop -arf + gpconfig -c diskquota.naptime -v 2 + gpstop -arf + pushd diskquota_src + [ -s regression.diffs ] && cat regression.diffs && exit 1 + make installcheck + popd + EOF + export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + chown gpadmin:gpadmin /home/gpadmin/test.sh + chmod a+x /home/gpadmin/test.sh + su gpadmin -c "bash /home/gpadmin/test.sh" +} + +function setup_gpadmin_user() { + case "$OSVER" in + suse*) + ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "sles" + ;; + centos*) + ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "centos" + ;; + *) echo "Unknown OS: $OSVER"; exit 1 ;; + esac + +} + +function install_diskquota() { + tar -xzf bin_diskquota/component_diskquota.tar.gz -C /usr/local/greenplum-db-devel +} +function _main() { + time install_gpdb + time setup_gpadmin_user + + time make_cluster + time install_diskquota + + time test +} + +_main "$@" diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml new file mode 100644 index 00000000000..56ad6486111 --- /dev/null +++ b/concourse/tasks/build_diskquota.yml @@ -0,0 +1,16 @@ +platform: linux +image_resource: + type: docker-image +inputs: + - name: bin_gpdb + - name: diskquota_src + - name: gpdb_src + +outputs: + - name: diskquota_artifacts + +run: + path: diskquota_src/concourse/scripts/build_diskquota.sh +params: + OSVER: + GPDBVER: diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml new file mode 100644 index 00000000000..372d51dafb9 --- /dev/null +++ b/concourse/tasks/test_diskquota.yml @@ -0,0 +1,14 @@ +platform: linux +image_resource: + type: docker-image +inputs: + - name: bin_gpdb + - name: diskquota_src + - name: bin_diskquota + - name: gpdb_src + +run: + path: diskquota_src/concourse/scripts/test_diskquota.sh +params: + OSVER: + GPDBVER: diff --git a/diskquota.c b/diskquota.c index 567fed938bd..44939dd96ea 100644 --- a/diskquota.c +++ b/diskquota.c @@ -60,7 +60,7 @@ PG_FUNCTION_INFO_V1(diskquota_start_worker); PG_FUNCTION_INFO_V1(init_table_size_table); /* timeout count to wait response from launcher process, in 1/10 sec */ -#define WAIT_TIME_COUNT 120 +#define WAIT_TIME_COUNT 1200 /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 @@ -85,7 +85,7 @@ struct DiskQuotaWorkerEntry }; DiskQuotaLocks diskquota_locks; -volatile MessageBox *message_box = NULL; +MessageBox *message_box = NULL; /* using hash table to support incremental update the table size entry.*/ static HTAB *disk_quota_worker_map = NULL; @@ -109,7 +109,7 @@ static void exec_simple_spi(const char *sql, int expected_code); static bool add_db_to_config(Oid dbid); static void del_db_from_config(Oid dbid); static void process_message_box(void); -static void process_message_box_internal(MessageResult * code); +static void process_message_box_internal(MessageResult * code, MessageBox local_message_box); static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static const char *err_code_to_err_message(MessageResult code); @@ -140,7 +140,7 @@ _PG_init(void) "Duration between each check (in seconds).", NULL, &diskquota_naptime, - 5, + 2, 1, INT_MAX, PGC_SIGHUP, @@ -267,17 +267,24 @@ disk_quota_worker_main(Datum main_arg) /* Connect to our database */ BackgroundWorkerInitializeConnection(dbname, NULL); + /* + * Set ps display name of the worker process of diskquota, so we can + * distinguish them quickly. Note: never mind parameter name of the + * function `init_ps_display`, we only want the ps name looks like + * 'bgworker: [diskquota] ...' + */ + init_ps_display("bgworker:", "[diskquota]", dbname, ""); + /* * Initialize diskquota related local hash map and refresh model * immediately */ init_disk_quota_model(); - /* sleep 2 seconds to wait create extension statement finished */ - sleep(2); while (!got_sigterm) { int rc; - + + CHECK_FOR_INTERRUPTS(); /* * Check whether the state is in ready mode. The state would be * unknown, when you `create extension diskquota` at the first time. @@ -295,14 +302,6 @@ disk_quota_worker_main(Datum main_arg) } refresh_disk_quota_model(true); - /* - * Set ps display name of the worker process of diskquota, so we can - * distinguish them quickly. Note: never mind parameter name of the - * function `init_ps_display`, we only want the ps name looks like - * 'bgworker: [diskquota] ...' - */ - init_ps_display("bgworker:", "[diskquota]", dbname, ""); - /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -310,6 +309,7 @@ disk_quota_worker_main(Datum main_arg) { int rc; + CHECK_FOR_INTERRUPTS(); /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as @@ -346,7 +346,7 @@ disk_quota_worker_main(Datum main_arg) * create table to record the list of monitored databases * we need a place to store the database with diskquota enabled * (via CREATE EXTENSION diskquota). Currently, we store them into - * heap table in diskquota_namespace schema of postgres database. + * heap table in diskquota_namespace schema of diskquota database. * When database restarted, diskquota laucher will start worker processes * for these databases. */ @@ -370,6 +370,9 @@ exec_simple_utility(const char *sql) debug_query_string = NULL; } +/* + * SPI execute sql interface + */ static void exec_simple_spi(const char *sql, int expected_code) { @@ -377,11 +380,13 @@ exec_simple_spi(const char *sql, int expected_code) ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - elog(ERROR, "connect error, code=%d", ret); + elog(ERROR, "[diskquota] SPI connect error, code=%d", ret); PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_execute(sql, false, 0); if (ret != expected_code) - elog(ERROR, "sql:'%s', code %d", sql, ret); + { + elog(ERROR, "[diskquota] SPI_execute sql:'%s', code %d", sql, ret); + } SPI_finish(); PopActiveSnapshot(); } @@ -419,7 +424,7 @@ start_workers_from_dblist() ret = SPI_connect(); if (ret != SPI_OK_CONNECT) elog(ERROR, "connect error, code=%d", ret); - ret = SPI_execute("select dbid from diskquota_namespace.database_list;", false, 0); + ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "select diskquota_namespace.database_list"); tupdesc = SPI_tuptable->tupdesc; @@ -579,9 +584,12 @@ disk_quota_launcher_main(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); message_box->launcher_pid = MyProcPid; + LWLockRelease(diskquota_locks.message_box_lock); /* Connect to our database */ BackgroundWorkerInitializeConnection("diskquota", NULL); + create_monitor_db_table(); memset(&hash_ctl, 0, sizeof(hash_ctl)); @@ -603,6 +611,7 @@ disk_quota_launcher_main(Datum main_arg) { int rc; + CHECK_FOR_INTERRUPTS(); /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as @@ -632,7 +641,6 @@ disk_quota_launcher_main(Datum main_arg) got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } - } proc_exit(1); @@ -839,7 +847,7 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) SPI_connect(); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); @@ -1034,56 +1042,87 @@ diskquota_start_worker(PG_FUNCTION_ARGS) { int rc; - elog(LOG, "[diskquota]:DB = %d, MyProc=%p launcher pid=%d", MyDatabaseId, MyProc, message_box->launcher_pid); + /* + * Lock on extension_lock to avoid multiple backend create diskquota + * extension at the same time. + */ + LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); message_box->req_pid = MyProcPid; message_box->cmd = CMD_CREATE_EXTENSION; message_box->result = ERR_PENDING; - message_box->data[0] = MyDatabaseId; - /* setup sig handler to receive message */ + message_box->dbid = MyDatabaseId; + /* setup sig handler to diskquota launcher process */ rc = kill(message_box->launcher_pid, SIGUSR1); + LWLockRelease(diskquota_locks.message_box_lock); if (rc == 0) { int count = WAIT_TIME_COUNT; while (count-- > 0) { + CHECK_FOR_INTERRUPTS(); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); + LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); if (message_box->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.message_box_lock); break; + } + LWLockRelease(diskquota_locks.message_box_lock); } } - message_box->req_pid = 0; - LWLockRelease(diskquota_locks.message_box_lock); + LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); if (message_box->result != ERR_OK) - elog(ERROR, "%s", err_code_to_err_message((MessageResult) message_box->result)); + { + LWLockRelease(diskquota_locks.message_box_lock); + LWLockRelease(diskquota_locks.extension_lock); + elog(ERROR, "[diskquota] failed to create diskquota extension: %s", err_code_to_err_message((MessageResult) message_box->result)); + } + LWLockRelease(diskquota_locks.message_box_lock); + LWLockRelease(diskquota_locks.extension_lock); PG_RETURN_VOID(); } static void -process_message_box_internal(MessageResult * code) +process_message_box_internal(MessageResult * code, MessageBox local_message_box) { - Assert(message_box->launcher_pid == MyProcPid); - switch (message_box->cmd) + int old_num_db = num_db; + PG_TRY(); { - case CMD_CREATE_EXTENSION: - on_add_db(message_box->data[0], code); - num_db++; - break; - case CMD_DROP_EXTENSION: - on_del_db(message_box->data[0]); - num_db--; - break; - default: - elog(LOG, "[diskquota]:unsupported message cmd=%d", message_box->cmd); - *code = ERR_UNKNOWN; - break; + switch (local_message_box.cmd) + { + case CMD_CREATE_EXTENSION: + on_add_db(local_message_box.dbid, code); + num_db++; + *code = ERR_OK; + break; + case CMD_DROP_EXTENSION: + on_del_db(local_message_box.dbid); + num_db--; + *code = ERR_OK; + break; + default: + elog(LOG, "[diskquota]:received unsupported message cmd=%d", local_message_box.cmd); + *code = ERR_UNKNOWN; + break; + } + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + RESUME_INTERRUPTS(); + num_db = old_num_db; } + PG_END_TRY(); } /* @@ -1095,30 +1134,32 @@ static void process_message_box() { MessageResult code = ERR_UNKNOWN; - int old_num_db = num_db; + MessageBox local_message_box; - if (message_box->req_pid == 0) - return; - elog(LOG, "[launcher]: received message"); - PG_TRY(); + LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); + memcpy(&local_message_box, message_box, sizeof(MessageBox)); + LWLockRelease(diskquota_locks.message_box_lock); + + /* create/drop extension message must be valid */ + if (local_message_box.req_pid == 0 || local_message_box.launcher_pid != MyProcPid) { - StartTransactionCommand(); - process_message_box_internal(&code); - CommitTransactionCommand(); - code = ERR_OK; + return; } - PG_CATCH(); - { - error_context_stack = NULL; - HOLD_INTERRUPTS(); + + elog(LOG, "[diskquota]: received create/drop extension diskquota message"); + StartTransactionCommand(); + process_message_box_internal(&code, local_message_box); + if (code == ERR_OK) + CommitTransactionCommand(); + else AbortCurrentTransaction(); - FlushErrorState(); - RESUME_INTERRUPTS(); - num_db = old_num_db; - } - PG_END_TRY(); + /* Send createdrop extension diskquota result back to QD */ + LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); + memset(message_box, 0, sizeof(MessageBox)); + message_box->launcher_pid = MyProcPid; message_box->result = (int) code; + LWLockRelease(diskquota_locks.message_box_lock); } /* @@ -1141,37 +1182,48 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, goto out; /* - * invoke drop extension diskquota 1. stop bgworker for MyDatabaseId 2. - * remove dbid from diskquota_namespace.database_list in postgres + * Lock on extension_lock to avoid multiple backend create diskquota + * extension at the same time. */ + LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); message_box->req_pid = MyProcPid; message_box->cmd = CMD_DROP_EXTENSION; message_box->result = ERR_PENDING; - message_box->data[0] = MyDatabaseId; + message_box->dbid = MyDatabaseId; rc = kill(message_box->launcher_pid, SIGUSR1); + LWLockRelease(diskquota_locks.message_box_lock); if (rc == 0) { int count = WAIT_TIME_COUNT; while (count-- > 0) { + CHECK_FOR_INTERRUPTS(); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); + LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); if (message_box->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.message_box_lock); break; + } + LWLockRelease(diskquota_locks.message_box_lock); } } - message_box->req_pid = 0; - LWLockRelease(diskquota_locks.message_box_lock); + LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); if (message_box->result != ERR_OK) - elog(ERROR, "[diskquota] %s", err_code_to_err_message((MessageResult) message_box->result)); - elog(LOG, "[diskquota] DROP EXTENTION diskquota; OK"); - + { + LWLockRelease(diskquota_locks.message_box_lock); + LWLockRelease(diskquota_locks.extension_lock); + elog(ERROR, "[diskquota] failed to create diskquota extension: %s", err_code_to_err_message((MessageResult) message_box->result)); + } + LWLockRelease(diskquota_locks.message_box_lock); + LWLockRelease(diskquota_locks.extension_lock); out: if (next_object_access_hook) (*next_object_access_hook) (access, classId, objectId, diff --git a/diskquota.h b/diskquota.h index d99d975873a..94507851cf6 100644 --- a/diskquota.h +++ b/diskquota.h @@ -27,6 +27,7 @@ struct DiskQuotaLocks LWLock *active_table_lock; LWLock *black_map_lock; LWLock *message_box_lock; + LWLock *extension_lock; /* ensure create diskquota extension serially */ }; typedef struct DiskQuotaLocks DiskQuotaLocks; @@ -42,13 +43,12 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; */ struct MessageBox { - int launcher_pid; - int req_pid; /* pid of the request process */ + int launcher_pid; /* diskquota launcher pid */ + int req_pid; /* pid of the QD process which create/drop diskquota extension */ int cmd; /* message command type, see MessageCommand */ int result; /* message result writen by launcher, see * MessageResult */ - int data[4]; /* for create/drop extension diskquota, - * data[0] is dbid */ + int dbid; /* dbid of create/drop diskquota extensionstatement */ }; enum MessageCommand @@ -77,7 +77,7 @@ typedef enum MessageCommand MessageCommand; typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; -extern volatile MessageBox *message_box; +extern MessageBox *message_box; /* enforcement interface*/ extern void init_disk_quota_enforcement(void); diff --git a/diskquota_schedule b/diskquota_schedule index d973cf10f45..594ff061f6f 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -1,9 +1,11 @@ test: init test: prepare +test: test_fast_disk_check test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake +test: test_truncate +test: test_delete_quota test: test_partition test: test_vacuum test: test_extension -test: test_fast_disk_check test: clean test: test_insert_after_drop diff --git a/expected/prepare.out b/expected/prepare.out index 365debe9b8d..2bc550373fa 100644 --- a/expected/prepare.out +++ b/expected/prepare.out @@ -38,7 +38,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); ERROR: schema's disk space quota exceeded with name:badquota -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_column.out b/expected/test_column.out index 48ffe1ae965..15017b3e636 100644 --- a/expected/test_column.out +++ b/expected/test_column.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('scolumn', '1 MB'); (1 row) SET search_path TO scolumn; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- @@ -25,7 +25,7 @@ ERROR: schema's disk space quota exceeded with name:scolumn ALTER TABLE a2 ADD COLUMN j VARCHAR(50); UPDATE a2 SET j = 'add value for column j'; ERROR: schema's disk space quota exceeded with name:scolumn -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_copy.out b/expected/test_copy.out index ccd8f941166..23232f6de6b 100644 --- a/expected/test_copy.out +++ b/expected/test_copy.out @@ -14,7 +14,7 @@ COPY c FROM '/tmp/csmall.txt'; -- expect failed INSERT INTO c SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s3 -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_delete_quota.out b/expected/test_delete_quota.out index d521e8dac4c..27ef90b731d 100644 --- a/expected/test_delete_quota.out +++ b/expected/test_delete_quota.out @@ -13,7 +13,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur -- expect failed INSERT INTO c SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:deleteschema -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- @@ -28,7 +28,7 @@ SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); (1 row) -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_drop_table.out b/expected/test_drop_table.out index 7723bb2da6b..5dbad5d8aae 100644 --- a/expected/test_drop_table.out +++ b/expected/test_drop_table.out @@ -21,7 +21,7 @@ ERROR: schema's disk space quota exceeded with name:sdrtbl INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:sdrtbl DROP TABLE a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_extension.out b/expected/test_extension.out index 60eacedf326..6f710258fea 100644 --- a/expected/test_extension.out +++ b/expected/test_extension.out @@ -208,13 +208,13 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: too many database to monitor (diskquota.c:1056) +ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1056) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: too many database to monitor (diskquota.c:1056) +ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1056) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 diff --git a/expected/test_fast_disk_check.out b/expected/test_fast_disk_check.out index faabff80387..b46b108d1bf 100644 --- a/expected/test_fast_disk_check.out +++ b/expected/test_fast_disk_check.out @@ -5,7 +5,7 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,2000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_insert_after_drop.out b/expected/test_insert_after_drop.out index f50a06c9a47..3c37b230ada 100644 --- a/expected/test_insert_after_drop.out +++ b/expected/test_insert_after_drop.out @@ -17,7 +17,7 @@ INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:sdrtbl -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_partition.out b/expected/test_partition.out index 87ce554c44c..519ddaca408 100644 --- a/expected/test_partition.out +++ b/expected/test_partition.out @@ -23,7 +23,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur NOTICE: CREATE TABLE will create partition "measurement_1_prt_feb06" for table "measurement" NOTICE: CREATE TABLE will create partition "measurement_1_prt_mar06" for table "measurement" INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- @@ -41,7 +41,7 @@ INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 DELETE FROM measurement WHERE logdate='2006-03-02'; VACUUM FULL measurement; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_reschema.out b/expected/test_reschema.out index 41e7c2cb2d4..82d8289ffe7 100644 --- a/expected/test_reschema.out +++ b/expected/test_reschema.out @@ -23,7 +23,7 @@ SELECT diskquota.set_schema_quota('srE', '1 GB'); (1 row) -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_role.out b/expected/test_role.out index fc8f70364bf..fec3be8778a 100644 --- a/expected/test_role.out +++ b/expected/test_role.out @@ -30,7 +30,7 @@ ERROR: role's disk space quota exceeded with name:u1 INSERT INTO b2 SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u1 ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_schema.out b/expected/test_schema.out index f94d1b5e44c..56c81c44bd4 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -26,7 +26,7 @@ ERROR: schema's disk space quota exceeded with name:s1 -- Test alter table set schema CREATE SCHEMA s2; ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_temp_role.out b/expected/test_temp_role.out index 798df7d5283..146ac986565 100644 --- a/expected/test_temp_role.out +++ b/expected/test_temp_role.out @@ -24,7 +24,7 @@ ERROR: role's disk space quota exceeded with name:u3temp INSERT INTO a SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u3temp DROP TABLE ta; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_toast.out b/expected/test_toast.out index d530a084ef7..391f983d957 100644 --- a/expected/test_toast.out +++ b/expected/test_toast.out @@ -15,7 +15,7 @@ SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,10000)) FROM generate_series(1,10); -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_truncate.out b/expected/test_truncate.out index e8d40d0da3e..cad4bd878e7 100644 --- a/expected/test_truncate.out +++ b/expected/test_truncate.out @@ -15,7 +15,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s7 -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- @@ -27,7 +27,7 @@ ERROR: schema's disk space quota exceeded with name:s7 INSERT INTO b SELECT generate_series(1,30); ERROR: schema's disk space quota exceeded with name:s7 TRUNCATE TABLE a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_update.out b/expected/test_update.out index b95095fa857..f025283142c 100644 --- a/expected/test_update.out +++ b/expected/test_update.out @@ -12,7 +12,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s4 -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/expected/test_vacuum.out b/expected/test_vacuum.out index 197c60d1aa5..5aaddc90552 100644 --- a/expected/test_vacuum.out +++ b/expected/test_vacuum.out @@ -15,7 +15,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000000); ERROR: schema's disk space quota exceeded with name:s6 -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- @@ -29,7 +29,7 @@ INSERT INTO b SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:s6 DELETE FROM a WHERE i > 10; VACUUM FULL a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/gp_activetable.c b/gp_activetable.c index 8a0a5188f6c..e8e4a1f9160 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -563,15 +563,32 @@ get_active_tables(void) bool found; relOid = RelidByRelfilenode(active_table_file_entry->tablespaceoid, active_table_file_entry->relfilenode); - - active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); - if (active_table_entry) + if (relOid != InvalidOid) { - active_table_entry->tableoid = relOid; - active_table_entry->tablesize = 0; + active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); + if (active_table_entry) + { + active_table_entry->tableoid = relOid; + active_table_entry->tablesize = 0; + } + hash_search(local_active_table_file_map, &active_table_file_entry, HASH_REMOVE, NULL); } } - elog(DEBUG1, "active table number is:%ld", hash_get_num_entries(local_active_table_file_map)); + /* If cannot convert relfilenode to relOid, put them back and wait for the next check. */ + if (hash_get_num_entries(local_active_table_file_map) > 0) + { + bool found; + DiskQuotaActiveTableFileEntry *entry; + hash_seq_init(&iter, local_active_table_file_map); + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + entry = hash_search(active_tables_map, &active_table_file_entry, HASH_ENTER_NULL, &found); + if (entry) + *entry = *active_table_file_entry; + } + LWLockRelease(diskquota_locks.active_table_lock); + } hash_destroy(local_active_table_file_map); return local_active_table_stats_map; } @@ -596,24 +613,24 @@ load_table_size(HTAB *local_table_stats_map) if (!rel) { /* configuration table is missing. */ - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("table \"table_size\" is missing in database \"%s\", please recreate diskquota extension", get_database_name(MyDatabaseId)))); + elog(ERROR, "[diskquota] table \"table_size\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); } - heap_close(rel, NoLock); - - + heap_close(rel, AccessShareLock); ret = SPI_execute("select tableid, size from diskquota.table_size", true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("SPI_execute failed: error code %d", ret))); + elog(ERROR, "[diskquota] load_table_size SPI_execute failed: error code %d", ret); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 2 || ((tupdesc)->attrs[0])->atttypid != OIDOID || ((tupdesc)->attrs[1])->atttypid != INT8OID) { - ereport(ERROR, (errmsg("table \"table_size\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + elog(ERROR, "[diskquota] table \"table_size\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); } for (i = 0; i < SPI_processed; i++) @@ -690,8 +707,6 @@ gp_fetch_active_tables(bool is_init) map_string.data); sql = buffer.data; - elog(DEBUG1, "CHECK SPI QUERY is %s", sql); - CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); /* collect data from each segment */ @@ -755,7 +770,6 @@ convert_map_to_string(HTAB *active_list) initStringInfo(&buffer); appendStringInfo(&buffer, "{"); - elog(DEBUG1, "Try to convert size of active table is %ld", hash_get_num_entries(active_list)); hash_seq_init(&iter, active_list); @@ -845,6 +859,5 @@ pull_active_list_from_seg(void) } cdbdisp_clearCdbPgResults(&cdb_pgresults); - elog(DEBUG1, "The number of active table is %ld", hash_get_num_entries(local_table_stats_map)); return local_table_stats_map; } diff --git a/quotamodel.c b/quotamodel.c index 8dbce83c2d7..7b389bb950a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -135,11 +135,13 @@ static void update_role_map(Oid owneroid, int64 updatesize); static void remove_namespace_map(Oid namespaceoid); static void remove_role_map(Oid owneroid); static bool load_quotas(void); +static bool do_load_quotas(void); static bool do_check_diskquota_state_is_ready(void); static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); +static void truncateStringInfo(StringInfo str, int nchars); /* * DiskQuotaShmemSize @@ -162,6 +164,7 @@ init_lwlocks(void) diskquota_locks.active_table_lock = LWLockAssign(); diskquota_locks.black_map_lock = LWLockAssign(); diskquota_locks.message_box_lock = LWLockAssign(); + diskquota_locks.extension_lock = LWLockAssign(); } /* @@ -213,7 +216,7 @@ init_disk_quota_shmem(void) * resources in pgss_shmem_startup(). */ RequestAddinShmemSpace(DiskQuotaShmemSize()); - RequestAddinLWLocks(3); + RequestAddinLWLocks(4); /* * Install startup hook to initialize our shared memory. @@ -310,24 +313,20 @@ do_check_diskquota_state_is_ready(void) rel = heap_openrv_extended(rv, AccessShareLock, true); if (!rel) { - /* configuration table is missing. */ - elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); return false; } - heap_close(rel, NoLock); + heap_close(rel, AccessShareLock); /* check diskquota state from table diskquota.state */ ret = SPI_execute("select state from diskquota.state", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "SPI_execute failed: error code %d", ret); + elog(ERROR, "[diskquota] check diskquota state SPI_execute failed: error code %d", ret); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) { - elog(ERROR, "table \"state\" is corrupted in database \"%s\"," + elog(ERROR, "[diskquota] table \"state\" is corrupted in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)); return false; @@ -384,19 +383,12 @@ check_diskquota_state_is_ready(void) void refresh_disk_quota_model(bool is_init) { - elog(DEBUG1, "check disk quota begin"); - StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); + elog(LOG,"[diskquota] start refresh_disk_quota_model"); /* skip refresh model when load_quotas failed */ if (load_quotas()) { refresh_disk_quota_usage(is_init); } - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); - elog(DEBUG1, "check disk quota end"); } /* @@ -406,6 +398,10 @@ refresh_disk_quota_model(bool is_init) static void refresh_disk_quota_usage(bool force) { + StartTransactionCommand(); + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(force); calculate_schema_disk_usage(); @@ -414,6 +410,10 @@ refresh_disk_quota_usage(bool force) flush_to_table_size(); /* copy local black map back to shared black map */ flush_local_black_map(); + + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); } /* @@ -800,6 +800,22 @@ calculate_role_disk_usage(void) } } +/* + * Make sure a StringInfo's string is no longer than 'nchars' characters. + */ +static void +truncateStringInfo(StringInfo str, int nchars) +{ + if (str && + str->len > nchars) + { + Assert(str->data != NULL && + str->len <= str->maxlen); + str->len = nchars; + str->data[nchars] = '\0'; + } +} + /* * Flush the table_size_map to user table diskquota.table_size * To improve update performance, we first delete all the need_to_flush @@ -858,22 +874,41 @@ flush_to_table_size(void) elog(DEBUG1, "[diskquota] table_size delete_statement: %s", delete_statement.data); ret = SPI_execute(delete_statement.data, false, 0); if (ret != SPI_OK_DELETE) - elog(ERROR, "SPI_execute failed: error code %d", ret); + elog(ERROR, "[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret); } if (insert_statement_flag) { elog(DEBUG1, "[diskquota] table_size insert_statement: %s", insert_statement.data); ret = SPI_execute(insert_statement.data, false, 0); if (ret != SPI_OK_INSERT) - elog(ERROR, "SPI_execute failed: error code %d", ret); + elog(ERROR, "[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret); } } +/* + * Interface to load quotas from diskquota configuration table(quota_config). + */ +static bool +load_quotas(void) +{ + bool ret; + StartTransactionCommand(); + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + + ret = do_load_quotas(); + + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + return ret; +} + /* * Load quotas from diskquota configuration table(quota_config). */ static bool -load_quotas(void) +do_load_quotas(void) { int ret; TupleDesc tupdesc; @@ -890,13 +925,12 @@ load_quotas(void) if (!rel) { /* configuration table is missing. */ - elog(LOG, "configuration table \"quota_config\" is missing in database \"%s\"," + elog(LOG, "[diskquota] configuration table \"quota_config\" is missing in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)); return false; } - heap_close(rel, NoLock); - + heap_close(rel, AccessShareLock); /* * TODO: we should skip to reload quota config when there is no change in * quota.config. A flag in shared memory could be used to detect the quota @@ -921,7 +955,7 @@ load_quotas(void) ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "SPI_execute failed: error code %d", ret); + elog(ERROR, "[diskquota] load_quotas SPI_execute failed: error code %d", ret); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 3 || @@ -929,7 +963,7 @@ load_quotas(void) ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) { - elog(LOG, "configuration table \"quota_config\" is corrupted in database \"%s\"," + elog(LOG, "[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)); return false; @@ -1010,6 +1044,10 @@ quota_check_common(Oid reloid) bool found; BlackMapEntry keyitem; + if (!IsTransactionState()) + { + return true; + } memset(&keyitem, 0, sizeof(BlackMapEntry)); get_rel_owner_schema(reloid, &ownerOid, &nsOid); LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); diff --git a/sql/prepare.sql b/sql/prepare.sql index 21c888e8a65..35b1368e02b 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -14,6 +14,6 @@ CREATE ROLE testbody; CREATE TABLE badquota.t1(i INT); ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/sql/test_column.sql b/sql/test_column.sql index bc3775c7ef0..0a9b5882697 100644 --- a/sql/test_column.sql +++ b/sql/test_column.sql @@ -2,7 +2,7 @@ CREATE SCHEMA scolumn; SELECT diskquota.set_schema_quota('scolumn', '1 MB'); SET search_path TO scolumn; -SELECT pg_sleep(5); +SELECT pg_sleep(20); CREATE TABLE a2(i INT); -- expect fail @@ -11,7 +11,7 @@ INSERT INTO a2 SELECT generate_series(1,100000000); INSERT INTO a2 SELECT generate_series(1,10); ALTER TABLE a2 ADD COLUMN j VARCHAR(50); UPDATE a2 SET j = 'add value for column j'; -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert failed after add column INSERT INTO a2 SELECT generate_series(1,10); diff --git a/sql/test_copy.sql b/sql/test_copy.sql index 6d2c854e574..89295b650de 100644 --- a/sql/test_copy.sql +++ b/sql/test_copy.sql @@ -7,7 +7,7 @@ CREATE TABLE c (i int); COPY c FROM '/tmp/csmall.txt'; -- expect failed INSERT INTO c SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect copy fail COPY c FROM '/tmp/csmall.txt'; diff --git a/sql/test_delete_quota.sql b/sql/test_delete_quota.sql index a46ae3b2646..429134c3023 100644 --- a/sql/test_delete_quota.sql +++ b/sql/test_delete_quota.sql @@ -6,11 +6,11 @@ SET search_path TO deleteschema; CREATE TABLE c (i INT); -- expect failed INSERT INTO c SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect fail INSERT INTO c SELECT generate_series(1,100); SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO c SELECT generate_series(1,100); diff --git a/sql/test_drop_table.sql b/sql/test_drop_table.sql index d1f9b434f89..7c3b914f55d 100644 --- a/sql/test_drop_table.sql +++ b/sql/test_drop_table.sql @@ -10,7 +10,7 @@ INSERT INTO a SELECT generate_series(1,100000000); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); DROP TABLE a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO a2 SELECT generate_series(1,100); DROP TABLE a2; diff --git a/sql/test_fast_disk_check.sql b/sql/test_fast_disk_check.sql index fdcddb628f3..4f20fb37036 100644 --- a/sql/test_fast_disk_check.sql +++ b/sql/test_fast_disk_check.sql @@ -4,7 +4,7 @@ SET search_path to s1; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,2000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; RESET search_path; DROP TABLE s1.a; diff --git a/sql/test_insert_after_drop.sql b/sql/test_insert_after_drop.sql index c0e4974d4d4..61362a38dd7 100644 --- a/sql/test_insert_after_drop.sql +++ b/sql/test_insert_after_drop.sql @@ -9,7 +9,7 @@ CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO a SELECT generate_series(1,100); DROP EXTENSION diskquota; -- no sleep, it will take effect immediately diff --git a/sql/test_partition.sql b/sql/test_partition.sql index 8fd3ea20cdf..525685c268a 100644 --- a/sql/test_partition.sql +++ b/sql/test_partition.sql @@ -15,7 +15,7 @@ CREATE TABLE measurement ( ); INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT generate_series(1,100000000), '2006-03-02' ,1,1; @@ -25,7 +25,7 @@ INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; DELETE FROM measurement WHERE logdate='2006-03-02'; VACUUM FULL measurement; -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; diff --git a/sql/test_reschema.sql b/sql/test_reschema.sql index 48c3c05de7c..8f497726802 100644 --- a/sql/test_reschema.sql +++ b/sql/test_reschema.sql @@ -9,7 +9,7 @@ INSERT INTO a SELECT generate_series(1,1000000000); INSERT INTO a SELECT generate_series(1,1000); -- set schema quota larger SELECT diskquota.set_schema_quota('srE', '1 GB'); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert succeed INSERT INTO a SELECT generate_series(1,1000); diff --git a/sql/test_role.sql b/sql/test_role.sql index 06896e7e1e8..3f7a360c242 100644 --- a/sql/test_role.sql +++ b/sql/test_role.sql @@ -20,7 +20,7 @@ INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 89eb39fcb68..84a7dfe10cf 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -16,7 +16,7 @@ INSERT INTO a2 SELECT generate_series(1,100); -- Test alter table set schema CREATE SCHEMA s2; ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert succeed INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed diff --git a/sql/test_temp_role.sql b/sql/test_temp_role.sql index a2efcf80d50..cd7108a5c4e 100644 --- a/sql/test_temp_role.sql +++ b/sql/test_temp_role.sql @@ -14,7 +14,7 @@ INSERT INTO ta SELECT generate_series(1,100000000); -- expected failed: INSERT INTO a SELECT generate_series(1,100); DROP TABLE ta; -SELECT pg_sleep(5); +SELECT pg_sleep(20); INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; diff --git a/sql/test_toast.sql b/sql/test_toast.sql index d682fa6ac6e..e7620616216 100644 --- a/sql/test_toast.sql +++ b/sql/test_toast.sql @@ -9,7 +9,7 @@ SELECT (SELECT FROM generate_series(1,10000)) FROM generate_series(1,10); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert toast fail INSERT INTO a5 SELECT (SELECT diff --git a/sql/test_truncate.sql b/sql/test_truncate.sql index d269e2b9278..2a68081932b 100644 --- a/sql/test_truncate.sql +++ b/sql/test_truncate.sql @@ -5,12 +5,12 @@ SET search_path TO s7; CREATE TABLE a (i int); CREATE TABLE b (i int); INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert fail INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); TRUNCATE TABLE a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert succeed INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); diff --git a/sql/test_update.sql b/sql/test_update.sql index a6cedc35b08..c34bff16cbc 100644 --- a/sql/test_update.sql +++ b/sql/test_update.sql @@ -4,7 +4,7 @@ SELECT diskquota.set_schema_quota('s4', '1 MB'); SET search_path TO s4; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect update fail. UPDATE a SET i = 100; DROP TABLE a; diff --git a/sql/test_vacuum.sql b/sql/test_vacuum.sql index 5a91ac15848..8fd2a90bed2 100644 --- a/sql/test_vacuum.sql +++ b/sql/test_vacuum.sql @@ -5,14 +5,14 @@ SET search_path TO s6; CREATE TABLE a (i int); CREATE TABLE b (i int); INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -- expect insert fail INSERT INTO b SELECT generate_series(1,10); DELETE FROM a WHERE i > 10; VACUUM FULL a; -SELECT pg_sleep(5); +SELECT pg_sleep(20); -- expect insert succeed INSERT INTO a SELECT generate_series(1,10); INSERT INTO b SELECT generate_series(1,10); From 48edd9356a33c3d730524cba921a7965fab5fcee Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 5 Mar 2019 06:50:50 +0000 Subject: [PATCH 013/330] Update hook interface and remove hook in dispatcher. GPDB hook for diskquota is merged. Some interface changed: 1 use unified name file_xxx_hook for both AO and heap tables. 2 parameter of hook function changed to RelFileNodeBackend. 3 remove hook in dispatcher. --- enforcement.c | 57 ---------------------------- gp_activetable.c | 96 +++++++++++++----------------------------------- 2 files changed, 26 insertions(+), 127 deletions(-) diff --git a/enforcement.c b/enforcement.c index 0b71ede07f4..41d3c2a90b1 100644 --- a/enforcement.c +++ b/enforcement.c @@ -24,15 +24,8 @@ #define CHECKED_OID_LIST_NUM 64 static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); -static bool quota_check_DispatcherCheckPerms(void); static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook; -static DispatcherCheckPerms_hook_type prev_DispatcherCheckPerms_hook; -static void diskquota_free_callback(ResourceReleasePhase phase, bool isCommit, bool isTopLevel, void *arg); - -/* result relation need to be checked in dispatcher */ -static Oid checked_reloid_list[CHECKED_OID_LIST_NUM]; -static int checked_reloid_list_count = 0; /* * Initialize enforcement hooks. @@ -43,30 +36,6 @@ init_disk_quota_enforcement(void) /* enforcement hook before query is loading data */ prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; - - /* enforcement hook during query is loading data */ - prev_DispatcherCheckPerms_hook = DispatcherCheckPerms_hook; - DispatcherCheckPerms_hook = quota_check_DispatcherCheckPerms; - - /* setup and reset the result relaiton checked list */ - memset(checked_reloid_list, 0, sizeof(Oid) * CHECKED_OID_LIST_NUM); - RegisterResourceReleaseCallback(diskquota_free_callback, NULL); -} - -/* - * Reset checked reloid list - * This may be called multiple times at different resource relase - * phase, but it's safe to reset the checked_reloid_list. - */ -static void -diskquota_free_callback(ResourceReleasePhase phase, - bool isCommit, - bool isTopLevel, - void *arg) -{ - - checked_reloid_list_count = 0; - return; } /* @@ -99,32 +68,6 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) * quota limit exceeded. */ quota_check_common(rte->relid); - checked_reloid_list[checked_reloid_list_count++] = rte->relid; - - } - return true; -} - -/* - * Enformcent hook function when query is loading data. Throws an error if - * the quota has been exceeded. - */ -static bool -quota_check_DispatcherCheckPerms(void) -{ - int i; - - /* Perform the check as the relation's owner and namespace */ - for (i = 0; i < checked_reloid_list_count; i++) - { - Oid relid = checked_reloid_list[i]; - - /* - * Given table oid, check whether the quota limit of table's schema or - * table's owner are reached. This function will ereport(ERROR) when - * quota limit exceeded. - */ - quota_check_common(relid); } return true; } diff --git a/gp_activetable.c b/gp_activetable.c index e8e4a1f9160..04430bfb7ca 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -50,21 +50,13 @@ typedef struct DiskQuotaSetOFCache HTAB *active_tables_map = NULL; /* active table hooks*/ -static BufferedAppendWrite_hook_type prev_BufferedAppendWrite_hook = NULL; -static smgrcreate_hook_type prev_smgrcreate_hook = NULL; -static smgrextend_hook_type prev_smgrextend_hook = NULL; -static smgrtruncate_hook_type prev_smgrtruncate_hook = NULL; -static void active_table_hook_smgrcreate(SMgrRelation reln, - ForkNumber forknum, - bool isRedo); -static void active_table_hook_smgrextend(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum, - char *buffer, - bool skipFsync); -static void active_table_hook_smgrtruncate(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum); +static file_create_hook_type prev_file_create_hook = NULL; +static file_extend_hook_type prev_file_extend_hook = NULL; +static file_truncate_hook_type prev_file_truncate_hook = NULL; + +static void active_table_hook_smgrcreate(RelFileNodeBackend rnode); +static void active_table_hook_smgrextend(RelFileNodeBackend rnode); +static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode); PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); @@ -73,9 +65,8 @@ static HTAB *get_all_tables_size(void); static HTAB *get_active_tables(void); static StringInfoData convert_map_to_string(HTAB *active_list); static HTAB *pull_active_list_from_seg(void); -static void report_active_table_SmgrStat(SMgrRelation reln); -static void report_active_table_AO(BufferedAppend * bufferedAppend); static void load_table_size(HTAB *local_table_stats_map); +static void report_active_table_helper(const RelFileNodeBackend *relFileNode); void init_active_table_hook(void); void init_shm_worker_active_tables(void); @@ -88,52 +79,41 @@ HTAB *gp_fetch_active_tables(bool is_init); void init_active_table_hook(void) { - prev_smgrcreate_hook = smgrcreate_hook; - smgrcreate_hook = active_table_hook_smgrcreate; - - prev_smgrextend_hook = smgrextend_hook; - smgrextend_hook = active_table_hook_smgrextend; + prev_file_create_hook = file_create_hook; + file_create_hook = active_table_hook_smgrcreate; - prev_smgrtruncate_hook = smgrtruncate_hook; - smgrtruncate_hook = active_table_hook_smgrtruncate; + prev_file_extend_hook = file_extend_hook; + file_extend_hook = active_table_hook_smgrextend; - prev_BufferedAppendWrite_hook = BufferedAppendWrite_hook; - BufferedAppendWrite_hook = report_active_table_AO; + prev_file_truncate_hook = file_truncate_hook; + file_truncate_hook = active_table_hook_smgrtruncate; } static void -active_table_hook_smgrcreate(SMgrRelation reln, - ForkNumber forknum, - bool isRedo) +active_table_hook_smgrcreate(RelFileNodeBackend rnode) { - if (prev_smgrcreate_hook) - (*prev_smgrcreate_hook) (reln, forknum, isRedo); + if (prev_file_create_hook) + (*prev_file_create_hook) (rnode); - report_active_table_SmgrStat(reln); + report_active_table_helper(&rnode); } static void -active_table_hook_smgrextend(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum, - char *buffer, - bool skipFsync) +active_table_hook_smgrextend(RelFileNodeBackend rnode) { - if (prev_smgrextend_hook) - (*prev_smgrextend_hook) (reln, forknum, blocknum, buffer, skipFsync); + if (prev_file_extend_hook) + (*prev_file_extend_hook) (rnode); - report_active_table_SmgrStat(reln); + report_active_table_helper(&rnode); } static void -active_table_hook_smgrtruncate(SMgrRelation reln, - ForkNumber forknum, - BlockNumber blocknum) +active_table_hook_smgrtruncate(RelFileNodeBackend rnode) { - if (prev_smgrtruncate_hook) - (*prev_smgrtruncate_hook) (reln, forknum, blocknum); + if (prev_file_truncate_hook) + (*prev_file_truncate_hook) (rnode); - report_active_table_SmgrStat(reln); + report_active_table_helper(&rnode); } /* @@ -189,30 +169,6 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) } } -/* - * Hook function in smgr to report the active table - * information and store them in active table shared memory - * diskquota worker will consuming these active tables and - * recalculate their file size to update diskquota model. - */ -static void -report_active_table_SmgrStat(SMgrRelation reln) -{ - report_active_table_helper(&reln->smgr_rnode); -} - -/* - * Hook function in BufferedAppendWrite to report the active table, used by - * diskquota - */ -static void -report_active_table_AO(BufferedAppend * bufferedAppend) -{ - if (prev_BufferedAppendWrite_hook) - (*prev_BufferedAppendWrite_hook) (bufferedAppend); - report_active_table_helper(&bufferedAppend->relFileNode); -} - /* * Function to get the table size from each segments * There are two mode: 1. calcualte disk usage for all From d6b15060a2948a566d51e630d7939abf38d3ea38 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 29 Jan 2019 09:57:03 +0000 Subject: [PATCH 014/330] add diskquota soft limit 1. Add a new GUC diskquota.enable_hardlimit and default is false 2. Change diskquota enforcement to soft limit, therefore, the query will not be cancelled even if it aleady hits the diskquota limitation. --- diskquota.c | 21 +++++++- enforcement.c | 1 + expected/prepare.out | 24 +++++---- expected/test_column.out | 9 +++- expected/test_copy.out | 5 +- expected/test_delete_quota.out | 5 +- expected/test_drop_table.out | 9 +++- expected/test_extension.out | 76 ++++++++++++++++++++++------- expected/test_fast_disk_check.out | 6 +-- expected/test_insert_after_drop.out | 5 +- expected/test_partition.out | 9 +++- expected/test_rename.out | 18 +++++-- expected/test_reschema.out | 9 +++- expected/test_role.out | 9 +++- expected/test_schema.out | 9 +++- expected/test_temp_role.out | 9 +++- expected/test_toast.out | 6 +-- expected/test_truncate.out | 5 +- expected/test_update.out | 5 +- expected/test_vacuum.out | 5 +- sql/prepare.sql | 4 +- sql/test_column.sql | 3 +- sql/test_copy.sql | 4 +- sql/test_delete_quota.sql | 4 +- sql/test_drop_table.sql | 3 +- sql/test_extension.sql | 24 ++++++--- sql/test_fast_disk_check.sql | 4 +- sql/test_insert_after_drop.sql | 4 +- sql/test_partition.sql | 3 +- sql/test_rename.sql | 6 ++- sql/test_reschema.sql | 3 +- sql/test_role.sql | 3 +- sql/test_schema.sql | 3 +- sql/test_temp_role.sql | 3 +- sql/test_toast.sql | 6 +-- sql/test_truncate.sql | 4 +- sql/test_update.sql | 4 +- sql/test_vacuum.sql | 4 +- 38 files changed, 226 insertions(+), 108 deletions(-) diff --git a/diskquota.c b/diskquota.c index 44939dd96ea..cf00dac4abb 100644 --- a/diskquota.c +++ b/diskquota.c @@ -71,8 +71,9 @@ static volatile sig_atomic_t got_sigterm = false; static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +static bool diskquota_enable_hardlimit = false; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -162,6 +163,17 @@ _PG_init(void) NULL, NULL); + DefineCustomBoolVariable("diskquota.enable_hardlimit", + "Use in-query diskquota enforcement", + NULL, + &diskquota_enable_hardlimit, + false, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + /* start disk quota launcher only on master */ if (Gp_role != GP_ROLE_DISPATCH) { @@ -324,6 +336,11 @@ disk_quota_worker_main(Datum main_arg) /* Do the work */ refresh_disk_quota_model(false); + if (diskquota_enable_hardlimit) + { + /* TODO: Add hard limit function here */ + } + /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); diff --git a/enforcement.c b/enforcement.c index 41d3c2a90b1..431c6df033b 100644 --- a/enforcement.c +++ b/enforcement.c @@ -71,3 +71,4 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) } return true; } + diff --git a/expected/prepare.out b/expected/prepare.out index 2bc550373fa..d701b379ef3 100644 --- a/expected/prepare.out +++ b/expected/prepare.out @@ -1,13 +1,18 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u -20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Starting gpstop with args: -u -20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Gathering information and validating the environment... -20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Obtaining Greenplum Master catalog information -20181119:10:38:22:019976 gpstop:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master... -20181119:10:38:23:019976 gpstop:instance-1:huanzhang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.13149.g1ff3481 build dev-oss' -20181119:10:38:23:019976 gpstop:instance-1:huanzhang-[INFO]:-Signalling all postmaster processes to reload -. +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Starting gpstop with args: -u +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Gathering information and validating the environment... +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Segment details from master... +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.16105.gdfbfc2b build dev' +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Signalling all postmaster processes to reload +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + -- end_ignore SELECT pg_sleep(1); pg_sleep @@ -36,9 +41,8 @@ CREATE TABLE badquota.t1(i INT); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); -ERROR: schema's disk space quota exceeded with name:badquota -SELECT pg_sleep(20); +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_column.out b/expected/test_column.out index 15017b3e636..10143ce2081 100644 --- a/expected/test_column.out +++ b/expected/test_column.out @@ -17,8 +17,13 @@ CREATE TABLE a2(i INT); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect fail -INSERT INTO a2 SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:scolumn +INSERT INTO a2 SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect fail INSERT INTO a2 SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:scolumn diff --git a/expected/test_copy.out b/expected/test_copy.out index 23232f6de6b..c921bcac042 100644 --- a/expected/test_copy.out +++ b/expected/test_copy.out @@ -12,9 +12,8 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. COPY c FROM '/tmp/csmall.txt'; -- expect failed -INSERT INTO c SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:s3 -SELECT pg_sleep(20); +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_delete_quota.out b/expected/test_delete_quota.out index 27ef90b731d..1a5852d2e50 100644 --- a/expected/test_delete_quota.out +++ b/expected/test_delete_quota.out @@ -11,9 +11,8 @@ CREATE TABLE c (i INT); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect failed -INSERT INTO c SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:deleteschema -SELECT pg_sleep(20); +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_drop_table.out b/expected/test_drop_table.out index 5dbad5d8aae..cbd9d6376de 100644 --- a/expected/test_drop_table.out +++ b/expected/test_drop_table.out @@ -15,8 +15,13 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:sdrtbl +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:sdrtbl diff --git a/expected/test_extension.out b/expected/test_extension.out index 6f710258fea..791de0c5f2b 100644 --- a/expected/test_extension.out +++ b/expected/test_extension.out @@ -36,8 +36,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -81,8 +86,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -101,8 +111,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -121,8 +136,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -141,8 +161,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -161,8 +186,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -181,8 +211,13 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; @@ -201,20 +236,25 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -INSERT INTO SX.a values(generate_series(0, 100000000)); -ERROR: schema's disk space quota exceeded with name:sx +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1056) +ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1102) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1056) +ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1102) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 diff --git a/expected/test_fast_disk_check.out b/expected/test_fast_disk_check.out index b46b108d1bf..c2560e9036c 100644 --- a/expected/test_fast_disk_check.out +++ b/expected/test_fast_disk_check.out @@ -4,8 +4,8 @@ SET search_path to s1; CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,2000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,200000); +SELECT pg_sleep(10); pg_sleep ---------- @@ -14,7 +14,7 @@ SELECT pg_sleep(20); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; ?column? ---------- - t + f (1 row) RESET search_path; diff --git a/expected/test_insert_after_drop.out b/expected/test_insert_after_drop.out index 3c37b230ada..bd12e86283e 100644 --- a/expected/test_insert_after_drop.out +++ b/expected/test_insert_after_drop.out @@ -15,9 +15,8 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:sdrtbl -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_partition.out b/expected/test_partition.out index 519ddaca408..f8dc2195b04 100644 --- a/expected/test_partition.out +++ b/expected/test_partition.out @@ -31,8 +31,13 @@ SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail -INSERT INTO measurement SELECT generate_series(1,100000000), '2006-03-02' ,1,1; -ERROR: schema's disk space quota exceeded with name:s8 +INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 diff --git a/expected/test_rename.out b/expected/test_rename.out index dfaebffd907..687017ee0d8 100644 --- a/expected/test_rename.out +++ b/expected/test_rename.out @@ -11,8 +11,13 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:srs1 +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:srs1 @@ -45,8 +50,13 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO srerole; -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: role's disk space quota exceeded with name:srerole +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ERROR: role's disk space quota exceeded with name:srerole diff --git a/expected/test_reschema.out b/expected/test_reschema.out index 82d8289ffe7..5bc6db3a480 100644 --- a/expected/test_reschema.out +++ b/expected/test_reschema.out @@ -11,8 +11,13 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail -INSERT INTO a SELECT generate_series(1,1000000000); -ERROR: schema's disk space quota exceeded with name:sre +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail when exceed quota limit INSERT INTO a SELECT generate_series(1,1000); ERROR: schema's disk space quota exceeded with name:sre diff --git a/expected/test_role.out b/expected/test_role.out index fec3be8778a..280bb91ae81 100644 --- a/expected/test_role.out +++ b/expected/test_role.out @@ -21,8 +21,13 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); INSERT INTO b SELECT generate_series(1,100); -- expect insert fail -INSERT INTO b SELECT generate_series(1,100000000); -ERROR: role's disk space quota exceeded with name:u1 +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO b SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u1 diff --git a/expected/test_schema.out b/expected/test_schema.out index 56c81c44bd4..635a83a3986 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -12,8 +12,13 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:s1 +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 diff --git a/expected/test_temp_role.out b/expected/test_temp_role.out index 146ac986565..511df87bc32 100644 --- a/expected/test_temp_role.out +++ b/expected/test_temp_role.out @@ -18,8 +18,13 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table -INSERT INTO ta SELECT generate_series(1,100000000); -ERROR: role's disk space quota exceeded with name:u3temp +INSERT INTO ta SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expected failed: INSERT INTO a SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u3temp diff --git a/expected/test_toast.out b/expected/test_toast.out index 391f983d957..681bad5729f 100644 --- a/expected/test_toast.out +++ b/expected/test_toast.out @@ -14,7 +14,7 @@ INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,10000)) -FROM generate_series(1,10); +FROM generate_series(1,10000); SELECT pg_sleep(20); pg_sleep ---------- @@ -25,8 +25,8 @@ SELECT pg_sleep(20); INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') - FROM generate_series(1,100000)) -FROM generate_series(1,1000000); + FROM generate_series(1,1000)) +FROM generate_series(1,1000); ERROR: schema's disk space quota exceeded with name:s5 DROP TABLE a5; RESET search_path; diff --git a/expected/test_truncate.out b/expected/test_truncate.out index cad4bd878e7..170fd1a0c5d 100644 --- a/expected/test_truncate.out +++ b/expected/test_truncate.out @@ -13,9 +13,8 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur CREATE TABLE b (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:s7 -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_update.out b/expected/test_update.out index f025283142c..728739fc6d1 100644 --- a/expected/test_update.out +++ b/expected/test_update.out @@ -10,9 +10,8 @@ SET search_path TO s4; CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:s4 -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/expected/test_vacuum.out b/expected/test_vacuum.out index 5aaddc90552..a4c40423ce7 100644 --- a/expected/test_vacuum.out +++ b/expected/test_vacuum.out @@ -13,9 +13,8 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur CREATE TABLE b (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100000000); -ERROR: schema's disk space quota exceeded with name:s6 -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/sql/prepare.sql b/sql/prepare.sql index 35b1368e02b..63b7c268c34 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -13,7 +13,7 @@ SELECT diskquota.set_schema_quota('badquota', '1 MB'); CREATE ROLE testbody; CREATE TABLE badquota.t1(i INT); ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000000); -SELECT pg_sleep(20); +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/sql/test_column.sql b/sql/test_column.sql index 0a9b5882697..d71e6c0df80 100644 --- a/sql/test_column.sql +++ b/sql/test_column.sql @@ -6,7 +6,8 @@ SELECT pg_sleep(20); CREATE TABLE a2(i INT); -- expect fail -INSERT INTO a2 SELECT generate_series(1,100000000); +INSERT INTO a2 SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect fail INSERT INTO a2 SELECT generate_series(1,10); ALTER TABLE a2 ADD COLUMN j VARCHAR(50); diff --git a/sql/test_copy.sql b/sql/test_copy.sql index 89295b650de..a3e2a6c300b 100644 --- a/sql/test_copy.sql +++ b/sql/test_copy.sql @@ -6,8 +6,8 @@ SET search_path TO s3; CREATE TABLE c (i int); COPY c FROM '/tmp/csmall.txt'; -- expect failed -INSERT INTO c SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); -- expect copy fail COPY c FROM '/tmp/csmall.txt'; diff --git a/sql/test_delete_quota.sql b/sql/test_delete_quota.sql index 429134c3023..dbcbe113b45 100644 --- a/sql/test_delete_quota.sql +++ b/sql/test_delete_quota.sql @@ -5,8 +5,8 @@ SET search_path TO deleteschema; CREATE TABLE c (i INT); -- expect failed -INSERT INTO c SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); -- expect fail INSERT INTO c SELECT generate_series(1,100); SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); diff --git a/sql/test_drop_table.sql b/sql/test_drop_table.sql index 7c3b914f55d..80176d58825 100644 --- a/sql/test_drop_table.sql +++ b/sql/test_drop_table.sql @@ -6,7 +6,8 @@ CREATE TABLE a(i INT); CREATE TABLE a2(i INT); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); DROP TABLE a; diff --git a/sql/test_extension.sql b/sql/test_extension.sql index 18bcc611b43..8d612d66358 100644 --- a/sql/test_extension.sql +++ b/sql/test_extension.sql @@ -25,7 +25,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -48,7 +49,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -59,7 +61,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -70,7 +73,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -81,7 +85,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -92,7 +97,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -103,7 +109,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -114,7 +121,8 @@ CREATE EXTENSION diskquota; CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); -INSERT INTO SX.a values(generate_series(0, 100000000)); +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT pg_sleep(5); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; diff --git a/sql/test_fast_disk_check.sql b/sql/test_fast_disk_check.sql index 4f20fb37036..12c0704cab5 100644 --- a/sql/test_fast_disk_check.sql +++ b/sql/test_fast_disk_check.sql @@ -3,8 +3,8 @@ CREATE SCHEMA s1; SET search_path to s1; CREATE TABLE a(i int); -INSERT INTO a SELECT generate_series(1,2000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,200000); +SELECT pg_sleep(10); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; RESET search_path; DROP TABLE s1.a; diff --git a/sql/test_insert_after_drop.sql b/sql/test_insert_after_drop.sql index 61362a38dd7..3481e516358 100644 --- a/sql/test_insert_after_drop.sql +++ b/sql/test_insert_after_drop.sql @@ -8,8 +8,8 @@ SET search_path TO sdrtbl; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); INSERT INTO a SELECT generate_series(1,100); DROP EXTENSION diskquota; -- no sleep, it will take effect immediately diff --git a/sql/test_partition.sql b/sql/test_partition.sql index 525685c268a..bde27c9060d 100644 --- a/sql/test_partition.sql +++ b/sql/test_partition.sql @@ -18,7 +18,8 @@ INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail -INSERT INTO measurement SELECT generate_series(1,100000000), '2006-03-02' ,1,1; +INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; +SELECT pg_sleep(5); -- expect insert fail INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail diff --git a/sql/test_rename.sql b/sql/test_rename.sql index aec3d525ccf..1411ecf5f53 100644 --- a/sql/test_rename.sql +++ b/sql/test_rename.sql @@ -4,7 +4,8 @@ SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; CREATE TABLE a(i int); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ALTER SCHEMA srs1 RENAME TO srs2; @@ -30,7 +31,8 @@ CREATE TABLE a(i int); ALTER TABLE a OWNER TO srerole; -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ALTER ROLE srerole RENAME TO srerole2; diff --git a/sql/test_reschema.sql b/sql/test_reschema.sql index 8f497726802..723bcb15f91 100644 --- a/sql/test_reschema.sql +++ b/sql/test_reschema.sql @@ -4,7 +4,8 @@ SELECT diskquota.set_schema_quota('srE', '1 MB'); SET search_path TO srE; CREATE TABLE a(i int); -- expect insert fail -INSERT INTO a SELECT generate_series(1,1000000000); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail when exceed quota limit INSERT INTO a SELECT generate_series(1,1000); -- set schema quota larger diff --git a/sql/test_role.sql b/sql/test_role.sql index 3f7a360c242..8aaa3a9bca3 100644 --- a/sql/test_role.sql +++ b/sql/test_role.sql @@ -14,7 +14,8 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); INSERT INTO b SELECT generate_series(1,100); -- expect insert fail -INSERT INTO b SELECT generate_series(1,100000000); +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- expect insert fail diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 84a7dfe10cf..3bce5a08fad 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -6,7 +6,8 @@ SET search_path TO s1; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail -INSERT INTO a SELECT generate_series(1,100000000); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); CREATE TABLE a2(i int); diff --git a/sql/test_temp_role.sql b/sql/test_temp_role.sql index cd7108a5c4e..f53ec7ed125 100644 --- a/sql/test_temp_role.sql +++ b/sql/test_temp_role.sql @@ -10,7 +10,8 @@ CREATE TEMP TABLE ta(i int); ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table -INSERT INTO ta SELECT generate_series(1,100000000); +INSERT INTO ta SELECT generate_series(1,100000); +SELECT pg_sleep(5); -- expected failed: INSERT INTO a SELECT generate_series(1,100); DROP TABLE ta; diff --git a/sql/test_toast.sql b/sql/test_toast.sql index e7620616216..89a62013060 100644 --- a/sql/test_toast.sql +++ b/sql/test_toast.sql @@ -7,15 +7,15 @@ INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,10000)) -FROM generate_series(1,10); +FROM generate_series(1,10000); SELECT pg_sleep(20); -- expect insert toast fail INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') - FROM generate_series(1,100000)) -FROM generate_series(1,1000000); + FROM generate_series(1,1000)) +FROM generate_series(1,1000); DROP TABLE a5; RESET search_path; diff --git a/sql/test_truncate.sql b/sql/test_truncate.sql index 2a68081932b..5c4e616fcff 100644 --- a/sql/test_truncate.sql +++ b/sql/test_truncate.sql @@ -4,8 +4,8 @@ SELECT diskquota.set_schema_quota('s7', '1 MB'); SET search_path TO s7; CREATE TABLE a (i int); CREATE TABLE b (i int); -INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); -- expect insert fail INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); diff --git a/sql/test_update.sql b/sql/test_update.sql index c34bff16cbc..c33da4bb954 100644 --- a/sql/test_update.sql +++ b/sql/test_update.sql @@ -3,8 +3,8 @@ CREATE SCHEMA s4; SELECT diskquota.set_schema_quota('s4', '1 MB'); SET search_path TO s4; CREATE TABLE a(i int); -INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); -- expect update fail. UPDATE a SET i = 100; DROP TABLE a; diff --git a/sql/test_vacuum.sql b/sql/test_vacuum.sql index 8fd2a90bed2..ddc444262a9 100644 --- a/sql/test_vacuum.sql +++ b/sql/test_vacuum.sql @@ -4,8 +4,8 @@ SELECT diskquota.set_schema_quota('s6', '1 MB'); SET search_path TO s6; CREATE TABLE a (i int); CREATE TABLE b (i int); -INSERT INTO a SELECT generate_series(1,100000000); -SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(10); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -- expect insert fail From 89e9f207dc995e22962e740e81e5c5ba82d1b982 Mon Sep 17 00:00:00 2001 From: Hao Wu <37101401+gfphoenix78@users.noreply.github.com> Date: Mon, 25 Mar 2019 16:58:34 +0800 Subject: [PATCH 015/330] Fix format qualifier for Oid & use >=FirstNormalObjectId instead of > (#12) --- diskquota.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/diskquota.c b/diskquota.c index cf00dac4abb..ab933d9109a 100644 --- a/diskquota.c +++ b/diskquota.c @@ -469,7 +469,7 @@ start_workers_from_dblist() } if (start_worker_by_dboid(dbid) < 1) { - elog(WARNING, "[diskquota]: start worker process of database(%d) failed", dbid); + elog(WARNING, "[diskquota]: start worker process of database(%u) failed", dbid); } num++; } @@ -488,7 +488,7 @@ add_db_to_config(Oid dbid) StringInfoData str; initStringInfo(&str); - appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%d);", dbid); + appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%u);", dbid); exec_simple_spi(str.data, SPI_OK_INSERT); return true; } @@ -499,7 +499,7 @@ del_db_from_config(Oid dbid) StringInfoData str; initStringInfo(&str); - appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%d;", dbid); + appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%u;", dbid); exec_simple_spi(str.data, SPI_OK_DELETE); } @@ -563,7 +563,7 @@ on_add_db(Oid dbid, MessageResult * code) if (start_worker_by_dboid(dbid) < 1) { *code = ERR_START_WORKER; - elog(ERROR, "[diskquota] failed to start worker - dbid=%d", dbid); + elog(ERROR, "[diskquota] failed to start worker - dbid=%u", dbid); } } @@ -797,7 +797,7 @@ init_table_size_table(PG_FUNCTION_ARGS) appendStringInfo(&buf, "insert into diskquota.table_size " "select oid, pg_total_relation_size(oid) from pg_class " - "where oid> %u and (relkind='r' or relkind='m');", + "where oid>= %u and (relkind='r' or relkind='m');", FirstNormalObjectId); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_INSERT) From 362cdd14036e158c2546e1854d9f00a9030fc052 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Sat, 23 Mar 2019 03:26:14 +0000 Subject: [PATCH 016/330] Fix active_tables_map search key. --- gp_activetable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gp_activetable.c b/gp_activetable.c index 04430bfb7ca..a5ad65d1df1 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -126,8 +126,8 @@ init_shm_worker_active_tables(void) memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(DiskQuotaActiveTableEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.hash = tag_hash; active_tables_map = ShmemInitHash("active_tables", @@ -527,7 +527,7 @@ get_active_tables(void) active_table_entry->tableoid = relOid; active_table_entry->tablesize = 0; } - hash_search(local_active_table_file_map, &active_table_file_entry, HASH_REMOVE, NULL); + hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); } } /* If cannot convert relfilenode to relOid, put them back and wait for the next check. */ @@ -539,7 +539,7 @@ get_active_tables(void) LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) { - entry = hash_search(active_tables_map, &active_table_file_entry, HASH_ENTER_NULL, &found); + entry = hash_search(active_tables_map, active_table_file_entry, HASH_ENTER_NULL, &found); if (entry) *entry = *active_table_file_entry; } From ac4d7a5cf5cf7125f0ccbc75c0b6236e13a97088 Mon Sep 17 00:00:00 2001 From: Weinan Wang Date: Mon, 25 Mar 2019 17:33:44 +0800 Subject: [PATCH 017/330] initialize BackgroundWorker in PG_init function BackgroundWorker need to do memset in PG_init after declare. Otherwise, it may raise segment fault in RegisterBackgroundWorker() --- diskquota.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/diskquota.c b/diskquota.c index ab933d9109a..8ef2d038242 100644 --- a/diskquota.c +++ b/diskquota.c @@ -128,6 +128,8 @@ _PG_init(void) { BackgroundWorker worker; + memset(&worker, 0, sizeof(BackgroundWorker)); + /* diskquota.so must be in shared_preload_libraries to init SHM. */ if (!process_shared_preload_libraries_in_progress) elog(ERROR, "diskquota.so not in shared_preload_libraries."); From b8ea619c43bb53f3fbbf640fbe7b1c6082114d10 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Wed, 27 Mar 2019 11:49:13 +0800 Subject: [PATCH 018/330] Add diskquota standby test --- concourse/scripts/test_diskquota.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 0acccfc7423..5d58977d3e0 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -24,6 +24,11 @@ function test(){ pushd diskquota_src [ -s regression.diffs ] && cat regression.diffs && exit 1 make installcheck + ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 + export PGPORT=16432 + rm /tmp/.s.PGSQL.15432* + gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby + make installcheck popd EOF export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 From c8085cf136ae1edf8ab718372d9412a60381d9de Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Fri, 29 Mar 2019 13:39:52 +0800 Subject: [PATCH 019/330] Add try catch for diskquota workers accessing DB. diskquota workers may encounter error when sending SPI queries to DB, these errors should be catched and workers process could refetch database in the next loop instead of exiting the worker processes directly. Co-authored-by: Haozhou Wang --- diskquota.c | 80 +++++++++++-------- diskquota.h | 6 +- enforcement.c | 1 - gp_activetable.c | 9 ++- quotamodel.c | 198 +++++++++++++++++++++++++++++++++++------------ 5 files changed, 208 insertions(+), 86 deletions(-) diff --git a/diskquota.c b/diskquota.c index 8ef2d038242..0078a994413 100644 --- a/diskquota.c +++ b/diskquota.c @@ -71,9 +71,9 @@ static volatile sig_atomic_t got_sigterm = false; static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; -static bool diskquota_enable_hardlimit = false; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +static bool diskquota_enable_hardlimit = false; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -166,18 +166,18 @@ _PG_init(void) NULL); DefineCustomBoolVariable("diskquota.enable_hardlimit", - "Use in-query diskquota enforcement", - NULL, - &diskquota_enable_hardlimit, - false, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); + "Use in-query diskquota enforcement", + NULL, + &diskquota_enable_hardlimit, + false, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); /* start disk quota launcher only on master */ - if (Gp_role != GP_ROLE_DISPATCH) + if (!IS_QUERY_DISPATCHER()) { return; } @@ -206,8 +206,8 @@ _PG_fini(void) /* * Signal handler for SIGTERM - * Set a flag to let the main loop to terminate, and set our latch to wake - * it up. + * Set a flag to let the main loop to terminate, and set our latch to wake + * it up. */ static void disk_quota_sigterm(SIGNAL_ARGS) @@ -223,8 +223,8 @@ disk_quota_sigterm(SIGNAL_ARGS) /* * Signal handler for SIGHUP - * Set a flag to tell the main loop to reread the config file, and set - * our latch to wake it up. + * Set a flag to tell the main loop to reread the config file, and set + * our latch to wake it up. */ static void disk_quota_sighup(SIGNAL_ARGS) @@ -240,7 +240,7 @@ disk_quota_sighup(SIGNAL_ARGS) /* * Signal handler for SIGUSR1 - * Set a flag to tell the launcher to handle message box + * Set a flag to tell the launcher to handle message box */ static void disk_quota_sigusr1(SIGNAL_ARGS) @@ -297,8 +297,9 @@ disk_quota_worker_main(Datum main_arg) while (!got_sigterm) { int rc; - + CHECK_FOR_INTERRUPTS(); + /* * Check whether the state is in ready mode. The state would be * unknown, when you `create extension diskquota` at the first time. @@ -324,6 +325,7 @@ disk_quota_worker_main(Datum main_arg) int rc; CHECK_FOR_INTERRUPTS(); + /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as @@ -438,17 +440,22 @@ start_workers_from_dblist() int ret; int i; + /* + * Don't catch errors in start_workers_from_dblist. Since this is the + * startup worker for diskquota launcher. If error happens, we just let + * launcher exits. + */ StartTransactionCommand(); PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - elog(ERROR, "connect error, code=%d", ret); - ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); + elog(ERROR, "[diskquota launcher] SPI connect error, code=%d", ret); + ret = SPI_execute("[diskquota launcher] select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "select diskquota_namespace.database_list"); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) - elog(ERROR, "[diskquota] table database_list corrupt, laucher will exit"); + elog(ERROR, "[diskquota launcher] table database_list corrupt, laucher will exit"); for (i = 0; num < SPI_processed; i++) { @@ -460,9 +467,7 @@ start_workers_from_dblist() tup = SPI_tuptable->vals[i]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (isnull) - { - elog(ERROR, "dbid cann't be null"); - } + elog(ERROR, "[diskquota launcher] dbid cann't be null in table database_list"); dbid = DatumGetObjectId(dat); if (!is_valid_dbid(dbid)) { @@ -470,9 +475,7 @@ start_workers_from_dblist() continue; } if (start_worker_by_dboid(dbid) < 1) - { - elog(WARNING, "[diskquota]: start worker process of database(%u) failed", dbid); - } + elog(ERROR, "[diskquota launcher] start worker process of database(%u) failed", dbid); num++; } num_db = num; @@ -608,7 +611,7 @@ disk_quota_launcher_main(Datum main_arg) LWLockRelease(diskquota_locks.message_box_lock); /* Connect to our database */ BackgroundWorkerInitializeConnection("diskquota", NULL); - + create_monitor_db_table(); memset(&hash_ctl, 0, sizeof(hash_ctl)); @@ -631,6 +634,7 @@ disk_quota_launcher_main(Datum main_arg) int rc; CHECK_FOR_INTERRUPTS(); + /* * Background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as @@ -773,6 +777,11 @@ init_table_size_table(PG_FUNCTION_ARGS) RangeVar *rv; Relation rel; + /* + * If error happens in init_table_size_table, just return error messages + * to the client side. So there is no need to catch the error. + */ + /* ensure table diskquota.state exists */ rv = makeRangeVar("diskquota", "state", -1); rel = heap_openrv_extended(rv, AccessShareLock, true); @@ -864,6 +873,10 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) " and quotatype =%d", targetoid, type); + /* + * If error happens in set_quota_internal, just return error messages to + * the client side. So there is no need to catch the error. + */ SPI_connect(); ret = SPI_execute(buf.data, true, 0); @@ -1061,7 +1074,7 @@ diskquota_start_worker(PG_FUNCTION_ARGS) { int rc; - /* + /* * Lock on extension_lock to avoid multiple backend create diskquota * extension at the same time. */ @@ -1098,7 +1111,7 @@ diskquota_start_worker(PG_FUNCTION_ARGS) } LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); if (message_box->result != ERR_OK) - { + { LWLockRelease(diskquota_locks.message_box_lock); LWLockRelease(diskquota_locks.extension_lock); elog(ERROR, "[diskquota] failed to create diskquota extension: %s", err_code_to_err_message((MessageResult) message_box->result)); @@ -1112,6 +1125,7 @@ static void process_message_box_internal(MessageResult * code, MessageBox local_message_box) { int old_num_db = num_db; + PG_TRY(); { switch (local_message_box.cmd) @@ -1153,12 +1167,12 @@ static void process_message_box() { MessageResult code = ERR_UNKNOWN; - MessageBox local_message_box; + MessageBox local_message_box; LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); memcpy(&local_message_box, message_box, sizeof(MessageBox)); LWLockRelease(diskquota_locks.message_box_lock); - + /* create/drop extension message must be valid */ if (local_message_box.req_pid == 0 || local_message_box.launcher_pid != MyProcPid) { diff --git a/diskquota.h b/diskquota.h index 94507851cf6..300f82b738c 100644 --- a/diskquota.h +++ b/diskquota.h @@ -44,11 +44,13 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; struct MessageBox { int launcher_pid; /* diskquota launcher pid */ - int req_pid; /* pid of the QD process which create/drop diskquota extension */ + int req_pid; /* pid of the QD process which create/drop + * diskquota extension */ int cmd; /* message command type, see MessageCommand */ int result; /* message result writen by launcher, see * MessageResult */ - int dbid; /* dbid of create/drop diskquota extensionstatement */ + int dbid; /* dbid of create/drop diskquota + * extensionstatement */ }; enum MessageCommand diff --git a/enforcement.c b/enforcement.c index 431c6df033b..41d3c2a90b1 100644 --- a/enforcement.c +++ b/enforcement.c @@ -71,4 +71,3 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) } return true; } - diff --git a/gp_activetable.c b/gp_activetable.c index a5ad65d1df1..e53de53af81 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -530,11 +530,16 @@ get_active_tables(void) hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); } } - /* If cannot convert relfilenode to relOid, put them back and wait for the next check. */ + + /* + * If cannot convert relfilenode to relOid, put them back and wait for the + * next check. + */ if (hash_get_num_entries(local_active_table_file_map) > 0) { - bool found; + bool found; DiskQuotaActiveTableFileEntry *entry; + hash_seq_init(&iter, local_active_table_file_map); LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) diff --git a/quotamodel.c b/quotamodel.c index 7b389bb950a..5d17e844f06 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -135,7 +135,7 @@ static void update_role_map(Oid owneroid, int64 updatesize); static void remove_namespace_map(Oid namespaceoid); static void remove_role_map(Oid owneroid); static bool load_quotas(void); -static bool do_load_quotas(void); +static void do_load_quotas(void); static bool do_check_diskquota_state_is_ready(void); static Size DiskQuotaShmemSize(void); @@ -360,19 +360,52 @@ do_check_diskquota_state_is_ready(void) bool check_diskquota_state_is_ready(void) { - bool ret; + bool is_ready = false; + bool connected = false; + bool pushed_active_snap = false; + bool error_happens = false; StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); - ret = do_check_diskquota_state_is_ready(); - - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + is_ready = do_check_diskquota_state_is_ready(); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + error_happens = true; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) + SPI_finish(); + if (pushed_active_snap) + PopActiveSnapshot(); + if (error_happens) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); - return ret; + return is_ready; } /* @@ -383,7 +416,7 @@ check_diskquota_state_is_ready(void) void refresh_disk_quota_model(bool is_init) { - elog(LOG,"[diskquota] start refresh_disk_quota_model"); + elog(LOG, "[diskquota] start refresh_disk_quota_model"); /* skip refresh model when load_quotas failed */ if (load_quotas()) { @@ -398,22 +431,58 @@ refresh_disk_quota_model(bool is_init) static void refresh_disk_quota_usage(bool force) { + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); - - /* recalculate the disk usage of table, schema and role */ - calculate_table_disk_usage(force); - calculate_schema_disk_usage(); - calculate_role_disk_usage(); - /* flush local table_size_map to user table table_size */ - flush_to_table_size(); - /* copy local black map back to shared black map */ - flush_local_black_map(); - - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + /* recalculate the disk usage of table, schema and role */ + calculate_table_disk_usage(force); + calculate_schema_disk_usage(); + calculate_role_disk_usage(); + /* flush local table_size_map to user table table_size */ + flush_to_table_size(); + /* copy local black map back to shared black map */ + flush_local_black_map(); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) + SPI_finish(); + if (pushed_active_snap) + PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); + + return; } /* @@ -803,17 +872,17 @@ calculate_role_disk_usage(void) /* * Make sure a StringInfo's string is no longer than 'nchars' characters. */ -static void +static void truncateStringInfo(StringInfo str, int nchars) { - if (str && - str->len > nchars) - { - Assert(str->data != NULL && - str->len <= str->maxlen); - str->len = nchars; - str->data[nchars] = '\0'; - } + if (str && + str->len > nchars) + { + Assert(str->data != NULL && + str->len <= str->maxlen); + str->len = nchars; + str->data[nchars] = '\0'; + } } /* @@ -891,23 +960,57 @@ flush_to_table_size(void) static bool load_quotas(void) { - bool ret; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); - ret = do_load_quotas(); + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota worker process should + * tolerate this kind of errors and continue to check at the next loop. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + do_load_quotas(); + } + PG_CATCH(); + { + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + if (connected) + SPI_finish(); + if (pushed_active_snap) + PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); return ret; } /* * Load quotas from diskquota configuration table(quota_config). */ -static bool +static void do_load_quotas(void) { int ret; @@ -925,12 +1028,12 @@ do_load_quotas(void) if (!rel) { /* configuration table is missing. */ - elog(LOG, "[diskquota] configuration table \"quota_config\" is missing in database \"%s\"," + elog(ERROR, "[diskquota] configuration table \"quota_config\" is missing in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)); - return false; } heap_close(rel, AccessShareLock); + /* * TODO: we should skip to reload quota config when there is no change in * quota.config. A flag in shared memory could be used to detect the quota @@ -963,10 +1066,9 @@ do_load_quotas(void) ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) { - elog(LOG, "[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," + elog(ERROR, "[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)); - return false; } for (i = 0; i < SPI_processed; i++) @@ -1008,7 +1110,7 @@ do_load_quotas(void) quota_entry->limitsize = quota_limit_mb; } } - return true; + return; } /* From 8659ab20ff856c9523163fd6f4cc6e38f93de8fe Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 2 Apr 2019 14:38:17 +0800 Subject: [PATCH 020/330] Add test when primary segment is failed 1. Add a new test suit to make sure diskqouta is still working when primary segment is failed, and mirror segment is swtich to primary --- diskquota.c | 2 +- diskquota_schedule | 1 + expected/test_insert_after_drop.out | 8 ++- expected/test_primary_failure.out | 95 +++++++++++++++++++++++++++++ sql/test_insert_after_drop.sql | 3 +- sql/test_primary_failure.sql | 58 ++++++++++++++++++ 6 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 expected/test_primary_failure.out create mode 100644 sql/test_primary_failure.sql diff --git a/diskquota.c b/diskquota.c index 0078a994413..9f51d23d62b 100644 --- a/diskquota.c +++ b/diskquota.c @@ -450,7 +450,7 @@ start_workers_from_dblist() ret = SPI_connect(); if (ret != SPI_OK_CONNECT) elog(ERROR, "[diskquota launcher] SPI connect error, code=%d", ret); - ret = SPI_execute("[diskquota launcher] select dbid from diskquota_namespace.database_list;", true, 0); + ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "select diskquota_namespace.database_list"); tupdesc = SPI_tuptable->tupdesc; diff --git a/diskquota_schedule b/diskquota_schedule index 594ff061f6f..5f5f97accc4 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -6,6 +6,7 @@ test: test_truncate test: test_delete_quota test: test_partition test: test_vacuum +test: test_primary_failure test: test_extension test: clean test: test_insert_after_drop diff --git a/expected/test_insert_after_drop.out b/expected/test_insert_after_drop.out index bd12e86283e..49440f46f0d 100644 --- a/expected/test_insert_after_drop.out +++ b/expected/test_insert_after_drop.out @@ -25,7 +25,13 @@ SELECT pg_sleep(10); INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:sdrtbl DROP EXTENSION diskquota; --- no sleep, it will take effect immediately +-- sleep 1 second in case of system slow +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; \c postgres diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out new file mode 100644 index 00000000000..8909637032e --- /dev/null +++ b/expected/test_primary_failure.out @@ -0,0 +1,95 @@ +CREATE SCHEMA ftsr; +SELECT diskquota.set_schema_quota('ftsr', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO ftsr; +create or replace language plpythonu; +-- +-- pg_ctl: +-- datadir: data directory of process to target with `pg_ctl` +-- command: commands valid for `pg_ctl` +-- command_mode: modes valid for `pg_ctl -m` +-- +create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') +returns text as $$ + import subprocess + if command not in ('stop', 'restart'): + return 'Invalid command input' + + cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir + cmd = cmd + '-W -m %s %s' % (command_mode, command) + + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:ftsr +-- now one of primary is down +select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); + pg_ctl +---------------------- + server shutting down+ + +(1 row) + +-- switch mirror to primary +select gp_request_fts_probe_scan(); + gp_request_fts_probe_scan +--------------------------- + t +(1 row) + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | m | d | n + 0 | m | p | u | n +(2 rows) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:ftsr +-- increase quota +SELECT diskquota.set_schema_quota('ftsr', '200 MB'); + set_schema_quota +------------------ + +(1 row) + +-- pull up failed primary +-- start_ignore +-- end_ignore +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | p | u | s + 0 | m | m | u | s +(2 rows) + +-- no sleep, it will take effect immediately +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_schema_quota_view where schema_name='ftsr'; + quota_in_mb | nspsize_in_bytes +-------------+------------------ + 200 | 1310720 +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP SCHEMA ftsr CASCADE; +NOTICE: drop cascades to function pg_ctl(text,text,text) diff --git a/sql/test_insert_after_drop.sql b/sql/test_insert_after_drop.sql index 3481e516358..60a925411e1 100644 --- a/sql/test_insert_after_drop.sql +++ b/sql/test_insert_after_drop.sql @@ -12,7 +12,8 @@ INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(10); INSERT INTO a SELECT generate_series(1,100); DROP EXTENSION diskquota; --- no sleep, it will take effect immediately +-- sleep 1 second in case of system slow +SELECT pg_sleep(1); INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; diff --git a/sql/test_primary_failure.sql b/sql/test_primary_failure.sql new file mode 100644 index 00000000000..2064c99f59b --- /dev/null +++ b/sql/test_primary_failure.sql @@ -0,0 +1,58 @@ +CREATE SCHEMA ftsr; +SELECT diskquota.set_schema_quota('ftsr', '1 MB'); +SET search_path TO ftsr; +create or replace language plpythonu; +-- +-- pg_ctl: +-- datadir: data directory of process to target with `pg_ctl` +-- command: commands valid for `pg_ctl` +-- command_mode: modes valid for `pg_ctl -m` +-- +create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') +returns text as $$ + import subprocess + if command not in ('stop', 'restart'): + return 'Invalid command input' + + cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir + cmd = cmd + '-W -m %s %s' % (command_mode, command) + + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; + +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- now one of primary is down +select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); + +-- switch mirror to primary +select gp_request_fts_probe_scan(); + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- increase quota +SELECT diskquota.set_schema_quota('ftsr', '200 MB'); + +-- pull up failed primary +-- start_ignore +\! gprecoverseg -a +\! gprecoverseg -ar +-- end_ignore + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; +-- no sleep, it will take effect immediately +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_schema_quota_view where schema_name='ftsr'; +INSERT INTO a SELECT generate_series(1,100); + +DROP TABLE a; +DROP SCHEMA ftsr CASCADE; From df6af951400c0b894fbfa8e532b9e49725562d26 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 2 Apr 2019 18:32:42 +0800 Subject: [PATCH 021/330] fix test in standy master active mode --- expected/test_primary_failure.out | 12 +++++++++++- sql/test_primary_failure.sql | 11 +++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out index 8909637032e..432a31cf10e 100644 --- a/expected/test_primary_failure.out +++ b/expected/test_primary_failure.out @@ -24,6 +24,12 @@ returns text as $$ return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; +create or replace function pg_recoverseg(datadir text, command text) +returns text as $$ + import subprocess + cmd = 'gprecoverseg -%s -d %s ' % (command, datadir) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -73,6 +79,8 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -- end_ignore -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; @@ -92,4 +100,6 @@ SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_schema_quota_view where INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; DROP SCHEMA ftsr CASCADE; -NOTICE: drop cascades to function pg_ctl(text,text,text) +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pg_ctl(text,text,text) +drop cascades to function pg_recoverseg(text,text) diff --git a/sql/test_primary_failure.sql b/sql/test_primary_failure.sql index 2064c99f59b..a402ea01f3b 100644 --- a/sql/test_primary_failure.sql +++ b/sql/test_primary_failure.sql @@ -20,6 +20,13 @@ returns text as $$ return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; +create or replace function pg_recoverseg(datadir text, command text) +returns text as $$ + import subprocess + cmd = 'gprecoverseg -%s -d %s ' % (command, datadir) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; + CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); INSERT INTO a SELECT generate_series(1,100000); @@ -44,8 +51,8 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore -\! gprecoverseg -a -\! gprecoverseg -ar +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -- end_ignore -- check GPDB status From acf28a43173e255c6288f1c581cd4a1b72d9585a Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Mon, 8 Apr 2019 16:19:33 +0800 Subject: [PATCH 022/330] Fix issue during gprecoverseg 1. Workaround gprecoverseg issue when master is done and standby is activated in the test --- concourse/scripts/test_diskquota.sh | 14 +++++++------- expected/test_primary_failure.out | 2 +- sql/test_primary_failure.sql | 7 ++++++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 5d58977d3e0..83a30a267fe 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -4,31 +4,31 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ -if [ "$GPDBVER" == "GPDB4.3" ]; then - GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/ci/concourse/scripts -else - GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -fi +GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts source "${GPDB_CONCOURSE_DIR}/common.bash" function test(){ sudo chown -R gpadmin:gpadmin ${TOP_DIR}; cat > /home/gpadmin/test.sh <<-EOF set -exo pipefail + source gpdb_src/gpAux/gpdemo/gpdemo-env.sh + echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh - export PGPORT=15432 createdb diskquota gpconfig -c shared_preload_libraries -v 'diskquota' gpstop -arf gpconfig -c diskquota.naptime -v 2 gpstop -arf pushd diskquota_src - [ -s regression.diffs ] && cat regression.diffs && exit 1 make installcheck + [ -s regression.diffs ] && cat regression.diffs && exit 1 ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 export PGPORT=16432 + echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh + source /usr/local/greenplum-db-devel/greenplum_path.sh rm /tmp/.s.PGSQL.15432* gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck + [ -s regression.diffs ] && cat regression.diffs && exit 1 popd EOF export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out index 432a31cf10e..7e8962098be 100644 --- a/expected/test_primary_failure.out +++ b/expected/test_primary_failure.out @@ -27,7 +27,7 @@ $$ language plpythonu; create or replace function pg_recoverseg(datadir text, command text) returns text as $$ import subprocess - cmd = 'gprecoverseg -%s -d %s ' % (command, datadir) + cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; CREATE TABLE a(i int); diff --git a/sql/test_primary_failure.sql b/sql/test_primary_failure.sql index a402ea01f3b..513417f8a23 100644 --- a/sql/test_primary_failure.sql +++ b/sql/test_primary_failure.sql @@ -23,7 +23,7 @@ $$ language plpythonu; create or replace function pg_recoverseg(datadir text, command text) returns text as $$ import subprocess - cmd = 'gprecoverseg -%s -d %s ' % (command, datadir) + cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; @@ -52,6 +52,11 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_sleep(10); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); +select pg_sleep(15); +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +select pg_sleep(10); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -- end_ignore From 47484673c8d06783c54f1b6dfe9f77ea9d8c133d Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Mon, 8 Apr 2019 16:34:21 +0800 Subject: [PATCH 023/330] Refactor diskquota launcher. (#20) * Refactor diskquota launcher. Separate diskquota utility functions into file diskquota_utility.c Rename MessageBox to ExtensionDDLMessage. Cleanup SPI and snapshot when SPI_execute error. Add more comments for launcher code. Allow launcher to restart after pm reset. Add terminate_all_workers logic in launcher. --- Makefile | 4 +- diskquota.c | 1176 +++++++++++++---------------------- diskquota.h | 15 +- diskquota_utility.c | 558 +++++++++++++++++ enforcement.c | 2 +- expected/test_extension.out | 4 +- gp_activetable.c | 2 +- init_file | 2 + quotamodel.c | 36 +- 9 files changed, 1021 insertions(+), 778 deletions(-) create mode 100644 diskquota_utility.c diff --git a/Makefile b/Makefile index 82e0f88c993..fcd59e88356 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,8 @@ MODULE_big = diskquota EXTENSION = diskquota DATA = diskquota--1.0.sql SRCDIR = ./ -FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c -OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o +FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c +OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) diff --git a/diskquota.c b/diskquota.c index 9f51d23d62b..c2f8307c6bc 100644 --- a/diskquota.c +++ b/diskquota.c @@ -10,7 +10,7 @@ * Copyright (c) 2018-Present Pivotal Software, Inc. * * IDENTIFICATION - * gpcontrib/gp_diskquota/diskquota.c + * diskquota/diskquota.c * * ------------------------------------------------------------------------- */ @@ -22,7 +22,6 @@ #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/namespace.h" -#include "catalog/objectaccess.h" #include "catalog/pg_collation.h" #include "catalog/pg_database.h" #include "catalog/pg_extension.h" @@ -53,18 +52,11 @@ #include "diskquota.h" PG_MODULE_MAGIC; -/* disk quota helper function */ -PG_FUNCTION_INFO_V1(set_schema_quota); -PG_FUNCTION_INFO_V1(set_role_quota); -PG_FUNCTION_INFO_V1(diskquota_start_worker); -PG_FUNCTION_INFO_V1(init_table_size_table); - -/* timeout count to wait response from launcher process, in 1/10 sec */ -#define WAIT_TIME_COUNT 1200 - /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 +#define DISKQUOTA_DB "diskquota" + /* flags set by signal handlers */ static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; @@ -86,11 +78,10 @@ struct DiskQuotaWorkerEntry }; DiskQuotaLocks diskquota_locks; -MessageBox *message_box = NULL; +ExtensionDDLMessage *extension_ddl_message = NULL; /* using hash table to support incremental update the table size entry.*/ static HTAB *disk_quota_worker_map = NULL; -static object_access_hook_type next_object_access_hook; static int num_db = 0; /* functions of disk quota*/ @@ -101,20 +92,20 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); -static int64 get_size_in_mb(char *str); -static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static int start_worker_by_dboid(Oid dbid); -static void create_monitor_db_table(); -static inline void exec_simple_utility(const char *sql); -static void exec_simple_spi(const char *sql, int expected_code); -static bool add_db_to_config(Oid dbid); -static void del_db_from_config(Oid dbid); -static void process_message_box(void); -static void process_message_box_internal(MessageResult * code, MessageBox local_message_box); -static void dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg); -static const char *err_code_to_err_message(MessageResult code); -extern void diskquota_invalidate_db(Oid dbid); +static bool start_worker_by_dboid(Oid dbid); +static void start_workers_from_dblist(void); +static void create_monitor_db_table(void); +static void add_dbid_to_database_list(Oid dbid); +static void del_dbid_from_database_list(Oid dbid); +static void process_extension_ddl_message(void); +static void do_process_extension_ddl_message(MessageResult * code, + ExtensionDDLMessage local_extension_ddl_message); +static void try_kill_db_worker(Oid dbid); +static void terminate_all_workers(void); +static void on_add_db(Oid dbid, MessageResult * code); +static void on_del_db(Oid dbid, MessageResult * code); +static bool is_valid_dbid(Oid dbid); +extern void invalidate_database_blackmap(Oid dbid); /* * Entrypoint of diskquota module. @@ -181,15 +172,16 @@ _PG_init(void) { return; } + /* Add dq_object_access_hook to handle drop extension event. */ - next_object_access_hook = object_access_hook; - object_access_hook = dq_object_access_hook; + register_diskquota_object_access_hook(); /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; - worker.bgw_restart_time = BGW_NEVER_RESTART; + /* launcher process should be restarted after pm reset. */ + worker.bgw_restart_time = BGW_DEFAULT_RESTART_INTERVAL; snprintf(worker.bgw_library_name, BGW_MAXLEN, "diskquota"); snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_launcher_main"); worker.bgw_notify_pid = 0; @@ -240,7 +232,7 @@ disk_quota_sighup(SIGNAL_ARGS) /* * Signal handler for SIGUSR1 - * Set a flag to tell the launcher to handle message box + * Set a flag to tell the launcher to handle extension ddl message */ static void disk_quota_sigusr1(SIGNAL_ARGS) @@ -294,6 +286,8 @@ disk_quota_worker_main(Datum main_arg) * immediately */ init_disk_quota_model(); + + /* Waiting for diskquota state become ready */ while (!got_sigterm) { int rc; @@ -314,7 +308,28 @@ disk_quota_worker_main(Datum main_arg) WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + /* In case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + } + + /* if received sigterm, just exit the worker process */ + if (got_sigterm) + { + /* clear the out-of-quota blacklist in shared memory */ + invalidate_database_blackmap(MyDatabaseId); + proc_exit(0); } + + /* Refresh quota model with init mode */ refresh_disk_quota_model(true); /* @@ -337,6 +352,18 @@ disk_quota_worker_main(Datum main_arg) diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + /* In case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + /* Do the work */ refresh_disk_quota_model(false); @@ -344,14 +371,96 @@ disk_quota_worker_main(Datum main_arg) { /* TODO: Add hard limit function here */ } + } - /* emergency bailout if postmaster has died */ - if (rc & WL_POSTMASTER_DEATH) - proc_exit(1); + /* clear the out-of-quota blacklist in shared memory */ + invalidate_database_blackmap(MyDatabaseId); + proc_exit(0); +} + + + +/* ---- Functions for launcher process ---- */ +/* + * Launcher process manages the worker processes based on + * GUC diskquota.monitor_databases in configuration file. + */ +void +disk_quota_launcher_main(Datum main_arg) +{ + HASHCTL hash_ctl; + + /* establish signal handlers before unblocking signals. */ + pqsignal(SIGHUP, disk_quota_sighup); + pqsignal(SIGTERM, disk_quota_sigterm); + pqsignal(SIGUSR1, disk_quota_sigusr1); + + /* we're now ready to receive signals */ + BackgroundWorkerUnblockSignals(); + + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->launcher_pid = MyProcPid; + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + + /* + * connect to our database 'diskquota'. launcher process will exit if + * 'diskquota' database is not existed. + */ + BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); + + /* + * use table diskquota_namespace.database_list to store diskquota enabled + * database. + */ + create_monitor_db_table(); + + /* use disk_quota_worker_map to manage diskquota worker processes. */ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); + hash_ctl.hash = oid_hash; + + disk_quota_worker_map = hash_create("disk quota worker map", + 1024, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + + /* + * firstly start worker processes for each databases with diskquota + * enabled. + */ + start_workers_from_dblist(); + + /* main loop: do this until the SIGTERM handler tells us to terminate. */ + while (!got_sigterm) + { + int rc; + + CHECK_FOR_INTERRUPTS(); /* - * In case of a SIGHUP, just reload the configuration. + * background workers mustn't call usleep() or any direct equivalent: + * instead, they may wait on their process latch, which sleeps as + * necessary, but is awakened if postmaster dies. That way the + * background process goes away immediately in an emergency. */ + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); + ResetLatch(&MyProc->procLatch); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + /* process extension ddl message */ + if (got_sigusr1) + { + got_sigusr1 = false; + process_extension_ddl_message(); + } + + /* in case of a SIGHUP, just reload the configuration. */ if (got_sighup) { got_sighup = false; @@ -359,83 +468,89 @@ disk_quota_worker_main(Datum main_arg) } } - diskquota_invalidate_db(MyDatabaseId); + /* terminate all the diskquota worker processes before launcher exit */ + terminate_all_workers(); proc_exit(0); } -/** - * create table to record the list of monitored databases + +/* + * Create table to record the list of monitored databases * we need a place to store the database with diskquota enabled * (via CREATE EXTENSION diskquota). Currently, we store them into * heap table in diskquota_namespace schema of diskquota database. - * When database restarted, diskquota laucher will start worker processes + * When database restarted, diskquota launcher will start worker processes * for these databases. */ static void -create_monitor_db_table() +create_monitor_db_table(void) { const char *sql; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; sql = "create schema if not exists diskquota_namespace;" "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; - exec_simple_utility(sql); -} -static inline void -exec_simple_utility(const char *sql) -{ + /* debug_query_string need to be set for SPI_execute utility functions. */ debug_query_string = sql; + StartTransactionCommand(); - exec_simple_spi(sql, SPI_OK_UTILITY); - CommitTransactionCommand(); - debug_query_string = NULL; -} -/* - * SPI execute sql interface - */ -static void -exec_simple_spi(const char *sql, int expected_code) -{ - int ret; + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota launcher process should + * tolerate this kind of errors. + */ + PG_TRY(); + { + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; - ret = SPI_connect(); - if (ret != SPI_OK_CONNECT) - elog(ERROR, "[diskquota] SPI connect error, code=%d", ret); - PushActiveSnapshot(GetTransactionSnapshot()); - ret = SPI_execute(sql, false, 0); - if (ret != expected_code) + if (SPI_execute(sql, false, 0) != SPI_OK_UTILITY) + { + elog(ERROR, "[diskquota launcher] SPI_execute error, sql:'%s', errno:%d", sql, errno); + } + } + PG_CATCH(); { - elog(ERROR, "[diskquota] SPI_execute sql:'%s', code %d", sql, ret); + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); } - SPI_finish(); - PopActiveSnapshot(); -} - -static bool -is_valid_dbid(Oid dbid) -{ - HeapTuple tuple; + PG_END_TRY(); + if (connected) + SPI_finish(); + if (pushed_active_snap) + PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); - if (dbid == InvalidOid) - return false; - tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); - if (!HeapTupleIsValid(tuple)) - return false; - ReleaseSysCache(tuple); - return true; + debug_query_string = NULL; } /* - * in early stage, start all worker processes of diskquota-enabled databases - * from diskquota_namespace.database_list + * When launcher started, it will start all worker processes of + * diskquota-enabled databases from diskquota_namespace.database_list */ static void -start_workers_from_dblist() +start_workers_from_dblist(void) { TupleDesc tupdesc; - Oid fake_dbid[128]; - int fake_count = 0; int num = 0; int ret; int i; @@ -449,7 +564,7 @@ start_workers_from_dblist() PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - elog(ERROR, "[diskquota launcher] SPI connect error, code=%d", ret); + elog(ERROR, "[diskquota launcher] SPI connect error, errno:%d", errno); ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "select diskquota_namespace.database_list"); @@ -471,12 +586,22 @@ start_workers_from_dblist() dbid = DatumGetObjectId(dat); if (!is_valid_dbid(dbid)) { - fake_dbid[fake_count++] = dbid; + elog(LOG, "[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid); continue; } - if (start_worker_by_dboid(dbid) < 1) - elog(ERROR, "[diskquota launcher] start worker process of database(%u) failed", dbid); + if (!start_worker_by_dboid(dbid)) + elog(ERROR, "[diskquota launcher] start worker process of database(oid:%u) failed", dbid); num++; + + /* + * diskquota only supports to monitor at most MAX_NUM_MONITORED_DB + * databases + */ + if (num >= MAX_NUM_MONITORED_DB) + { + elog(LOG, "[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid); + break; + } } num_db = num; SPI_finish(); @@ -484,55 +609,116 @@ start_workers_from_dblist() CommitTransactionCommand(); /* TODO: clean invalid database */ - } -static bool -add_db_to_config(Oid dbid) +/* + * This function is called by launcher process to handle message from other backend + * processes which call CREATE/DROP EXTENSION diskquota; It must be able to catch errors, + * and return an error code back to the backend process. + */ +static void +process_extension_ddl_message() { - StringInfoData str; + MessageResult code = ERR_UNKNOWN; + ExtensionDDLMessage local_extension_ddl_message; - initStringInfo(&str); - appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%u);", dbid); - exec_simple_spi(str.data, SPI_OK_INSERT); - return true; -} + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + memcpy(&local_extension_ddl_message, extension_ddl_message, sizeof(ExtensionDDLMessage)); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); -static void -del_db_from_config(Oid dbid) -{ - StringInfoData str; + /* create/drop extension message must be valid */ + if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) + return; - initStringInfo(&str); - appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%u;", dbid); - exec_simple_spi(str.data, SPI_OK_DELETE); + elog(LOG, "[diskquota launcher]: received create/drop extension diskquota message"); + + do_process_extension_ddl_message(&code, local_extension_ddl_message); + + /* Send createdrop extension diskquota result back to QD */ + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + memset(extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); + extension_ddl_message->launcher_pid = MyProcPid; + extension_ddl_message->result = (int) code; + LWLockRelease(diskquota_locks.extension_ddl_message_lock); } + /* - * When drop exention database, diskquota laucher will receive a message - * to kill the diskquota worker process which monitoring the target database. + * Process 'create extension' and 'drop extension' message. + * For 'create extension' message, store dbid into table + * 'database_list' and start the diskquota worker process. + * For 'drop extension' message, remove dbid from table + * 'database_list' and stop the diskquota worker process. */ static void -try_kill_db_worker(Oid dbid) +do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local_extension_ddl_message) { - DiskQuotaWorkerEntry *hash_entry; - bool found; + int old_num_db = num_db; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; - hash_entry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) &dbid, - HASH_REMOVE, &found); - if (found) + StartTransactionCommand(); + + /* + * Cache Errors during SPI functions, for example a segment may be down + * and current SPI execute will fail. diskquota launcher process should + * tolerate this kind of errors. + */ + PG_TRY(); { - BackgroundWorkerHandle *handle; + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + connected = true; + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; - handle = hash_entry->handle; - TerminateBackgroundWorker(handle); - pfree(handle); + switch (local_extension_ddl_message.cmd) + { + case CMD_CREATE_EXTENSION: + on_add_db(local_extension_ddl_message.dbid, code); + num_db++; + *code = ERR_OK; + break; + case CMD_DROP_EXTENSION: + on_del_db(local_extension_ddl_message.dbid, code); + num_db--; + *code = ERR_OK; + break; + default: + elog(LOG, "[diskquota launcher]:received unsupported message cmd=%d", local_extension_ddl_message.cmd); + *code = ERR_UNKNOWN; + break; + } + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + ret = false; + num_db = old_num_db; + RESUME_INTERRUPTS(); } + PG_END_TRY(); + + if (connected) + SPI_finish(); + if (pushed_active_snap) + PopActiveSnapshot(); + if (ret) + CommitTransactionCommand(); + else + AbortCurrentTransaction(); } /* - * handle create extension diskquota + * Handle create extension diskquota * if we know the exact error which caused failure, * we set it, and error out */ @@ -542,12 +728,12 @@ on_add_db(Oid dbid, MessageResult * code) if (num_db >= MAX_NUM_MONITORED_DB) { *code = ERR_EXCEED; - elog(ERROR, "[diskquota] too database to monitor"); + elog(ERROR, "[diskquota launcher] too many databases to monitor"); } if (!is_valid_dbid(dbid)) { *code = ERR_INVALID_DBID; - elog(ERROR, "[diskquota] invalid database oid"); + elog(ERROR, "[diskquota launcher] invalid database oid"); } /* @@ -556,7 +742,7 @@ on_add_db(Oid dbid, MessageResult * code) */ PG_TRY(); { - add_db_to_config(dbid); + add_dbid_to_database_list(dbid); } PG_CATCH(); { @@ -565,114 +751,148 @@ on_add_db(Oid dbid, MessageResult * code) } PG_END_TRY(); - if (start_worker_by_dboid(dbid) < 1) + if (!start_worker_by_dboid(dbid)) { *code = ERR_START_WORKER; - elog(ERROR, "[diskquota] failed to start worker - dbid=%u", dbid); + elog(ERROR, "[diskquota launcher] failed to start worker - dbid=%u", dbid); } } /* - * handle message: drop extension diskquota + * Handle message: drop extension diskquota * do our best to: * 1. kill the associated worker process * 2. delete dbid from diskquota_namespace.database_list * 3. invalidate black-map entries from shared memory */ static void -on_del_db(Oid dbid) +on_del_db(Oid dbid, MessageResult * code) { - if (dbid == InvalidOid) - return; + if (!is_valid_dbid(dbid)) + { + *code = ERR_INVALID_DBID; + elog(ERROR, "[diskquota launcher] invalid database oid"); + } + + /* tell postmaster to stop this bgworker */ try_kill_db_worker(dbid); - del_db_from_config(dbid); + + /* + * delete dbid from diskquota_namespace.database_list set *code to + * ERR_DEL_FROM_DB if any error occurs + */ + PG_TRY(); + { + del_dbid_from_database_list(dbid); + } + PG_CATCH(); + { + *code = ERR_DEL_FROM_DB; + PG_RE_THROW(); + } + PG_END_TRY(); + } -/* ---- Functions for lancher process ---- */ /* - * Launcher process manages the worker processes based on - * GUC diskquota.monitor_databases in configuration file. + * Add the database id into table 'database_list' in + * database 'diskquota' to store the diskquota enabled + * database info. */ -void -disk_quota_launcher_main(Datum main_arg) +static void +add_dbid_to_database_list(Oid dbid) { - HASHCTL hash_ctl; + StringInfoData str; + int ret; - /* Establish signal handlers before unblocking signals. */ - pqsignal(SIGHUP, disk_quota_sighup); - pqsignal(SIGTERM, disk_quota_sigterm); - pqsignal(SIGUSR1, disk_quota_sigusr1); + initStringInfo(&str); + appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%u);", dbid); - /* We're now ready to receive signals */ - BackgroundWorkerUnblockSignals(); - - LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); - message_box->launcher_pid = MyProcPid; - LWLockRelease(diskquota_locks.message_box_lock); - /* Connect to our database */ - BackgroundWorkerInitializeConnection("diskquota", NULL); - - create_monitor_db_table(); - - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); - hash_ctl.hash = oid_hash; + /* errors will be cached in outer function */ + ret = SPI_execute(str.data, false, 0); + if (ret != SPI_OK_INSERT) + { + elog(ERROR, "[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno); + } + return; +} - disk_quota_worker_map = hash_create("disk quota worker map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); +/* + * Delete database id from table 'database_list' in + * database 'diskquota'. + */ +static void +del_dbid_from_database_list(Oid dbid) +{ + StringInfoData str; + int ret; - start_workers_from_dblist(); + initStringInfo(&str); + appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%u;", dbid); - /* - * Main loop: do this until the SIGTERM handler tells us to terminate - */ - while (!got_sigterm) + /* errors will be cached in outer function */ + ret = SPI_execute(str.data, false, 0); + if (ret != SPI_OK_DELETE) { - int rc; - - CHECK_FOR_INTERRUPTS(); + elog(ERROR, "[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno); + } +} - /* - * Background workers mustn't call usleep() or any direct equivalent: - * instead, they may wait on their process latch, which sleeps as - * necessary, but is awakened if postmaster dies. That way the - * background process goes away immediately in an emergency. - */ - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); - ResetLatch(&MyProc->procLatch); +/* + * When drop exention database, diskquota laucher will receive a message + * to kill the diskquota worker process which monitoring the target database. + */ +static void +try_kill_db_worker(Oid dbid) +{ + DiskQuotaWorkerEntry *hash_entry; + bool found; - /* emergency bailout if postmaster has died */ - if (rc & WL_POSTMASTER_DEATH) - proc_exit(1); - /* process message box, now someone is holding message_box_lock */ - if (got_sigusr1) - { - got_sigusr1 = false; - process_message_box(); - } + hash_entry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) &dbid, + HASH_REMOVE, &found); + if (found) + { + BackgroundWorkerHandle *handle; - /* - * In case of a SIGHUP, just reload the configuration. - */ - if (got_sighup) + handle = hash_entry->handle; + if (handle) { - got_sighup = false; - ProcessConfigFile(PGC_SIGHUP); + TerminateBackgroundWorker(handle); + pfree(handle); } } +} - proc_exit(1); +/* + * When launcher exits, it should also terminate all the workers. + */ +static void +terminate_all_workers(void) +{ + DiskQuotaWorkerEntry *hash_entry; + HASH_SEQ_STATUS iter; + + + hash_seq_init(&iter, disk_quota_worker_map); + + /* + * terminate the worker processes. since launcher will exit immediately, + * we skip to clear the disk_quota_worker_map + */ + while ((hash_entry = hash_seq_search(&iter)) != NULL) + { + if (hash_entry->handle) + TerminateBackgroundWorker(hash_entry->handle); + } } /* * Dynamically launch an disk quota worker process. + * This function is called when laucher process receive + * a 'create extension diskquota' message. */ -static int +static bool start_worker_by_dboid(Oid dbid) { BackgroundWorker worker; @@ -682,13 +902,20 @@ start_worker_by_dboid(Oid dbid) char *dbname; pid_t pid; bool found; - bool ok; + bool ret; DiskQuotaWorkerEntry *workerentry; memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; + + /* + * diskquota worker should not restart by bgworker framework. If + * postmaster reset, all the bgworkers will be terminated and diskquota + * launcher is restarted by postmaster. All the diskquota workers should + * be started by launcher process again. + */ worker.bgw_restart_time = BGW_NEVER_RESTART; sprintf(worker.bgw_library_name, "diskquota"); sprintf(worker.bgw_function_name, "disk_quota_worker_main"); @@ -702,10 +929,10 @@ start_worker_by_dboid(Oid dbid) worker.bgw_main_arg = (Datum) 0; old_ctx = MemoryContextSwitchTo(TopMemoryContext); - ok = RegisterDynamicBackgroundWorker(&worker, &handle); + ret = RegisterDynamicBackgroundWorker(&worker, &handle); MemoryContextSwitchTo(old_ctx); - if (!ok) - return -1; + if (!ret) + return false; status = WaitForBackgroundWorkerStartup(handle, &pid); if (status == BGWH_STOPPED) ereport(ERROR, @@ -730,557 +957,22 @@ start_worker_by_dboid(Oid dbid) workerentry->pid = pid; } - return pid; -} - -/* ---- Help Functions to set quota limit. ---- */ -/* - * Set disk quota limit for role. - */ -Datum -set_role_quota(PG_FUNCTION_ARGS) -{ - Oid roleoid; - char *rolname; - char *sizestr; - int64 quota_limit_mb; - - if (!superuser()) - { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); - } - - rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); - roleoid = get_role_oid(rolname, false); - - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); - quota_limit_mb = get_size_in_mb(sizestr); - - set_quota_internal(roleoid, quota_limit_mb, ROLE_QUOTA); - PG_RETURN_VOID(); -} - -/* - * init table diskquota.table_size. - * calculate table size by UDF pg_total_relation_size - */ -Datum -init_table_size_table(PG_FUNCTION_ARGS) -{ - int ret; - StringInfoData buf; - - RangeVar *rv; - Relation rel; - - /* - * If error happens in init_table_size_table, just return error messages - * to the client side. So there is no need to catch the error. - */ - - /* ensure table diskquota.state exists */ - rv = makeRangeVar("diskquota", "state", -1); - rel = heap_openrv_extended(rv, AccessShareLock, true); - if (!rel) - { - /* configuration table is missing. */ - elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); - } - heap_close(rel, NoLock); - - SPI_connect(); - - /* delete all the table size info in table_size if exist. */ - initStringInfo(&buf); - appendStringInfo(&buf, "delete from diskquota.table_size;"); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_DELETE) - elog(ERROR, "cannot delete table_size table: error code %d", ret); - - /* fill table_size table with table oid and size info. */ - resetStringInfo(&buf); - appendStringInfo(&buf, - "insert into diskquota.table_size " - "select oid, pg_total_relation_size(oid) from pg_class " - "where oid>= %u and (relkind='r' or relkind='m');", - FirstNormalObjectId); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert table_size table: error code %d", ret); - - /* set diskquota state to ready. */ - resetStringInfo(&buf); - appendStringInfo(&buf, - "update diskquota.state set state = %u;", - DISKQUOTA_READY_STATE); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UPDATE) - elog(ERROR, "cannot update state table: error code %d", ret); - - SPI_finish(); - PG_RETURN_VOID(); -} - -/* - * Set disk quota limit for schema. - */ -Datum -set_schema_quota(PG_FUNCTION_ARGS) -{ - Oid namespaceoid; - char *nspname; - char *sizestr; - int64 quota_limit_mb; - - if (!superuser()) - { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); - } - - nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); - namespaceoid = get_namespace_oid(nspname, false); - - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); - quota_limit_mb = get_size_in_mb(sizestr); - - set_quota_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); - PG_RETURN_VOID(); -} - -/* - * Write the quota limit info into quota_config table under - * 'diskquota' schema of the current database. - */ -static void -set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) -{ - int ret; - StringInfoData buf; - - initStringInfo(&buf); - appendStringInfo(&buf, - "select true from diskquota.quota_config where targetoid = %u" - " and quotatype =%d", - targetoid, type); - - /* - * If error happens in set_quota_internal, just return error messages to - * the client side. So there is no need to catch the error. - */ - SPI_connect(); - - ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select quota setting table: error code %d", ret); - - /* if the schema or role's quota has been set before */ - if (SPI_processed == 0 && quota_limit_mb > 0) - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "insert into diskquota.quota_config values(%u,%d,%ld);", - targetoid, type, quota_limit_mb); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb <= 0) - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "delete from diskquota.quota_config where targetoid=%u" - " and quotatype=%d;", - targetoid, type); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_DELETE) - elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb > 0) - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" - " and quotatype=%d;", - quota_limit_mb, targetoid, type); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UPDATE) - elog(ERROR, "cannot update quota setting table, error code %d", ret); - } - - /* - * And finish our transaction. - */ - SPI_finish(); - return; -} - -/* - * Convert a human-readable size to a size in MB. - */ -static int64 -get_size_in_mb(char *str) -{ - char *strptr, - *endptr; - char saved_char; - Numeric num; - int64 result; - bool have_digits = false; - - /* Skip leading whitespace */ - strptr = str; - while (isspace((unsigned char) *strptr)) - strptr++; - - /* Check that we have a valid number and determine where it ends */ - endptr = strptr; - - /* Part (1): sign */ - if (*endptr == '-' || *endptr == '+') - endptr++; - - /* Part (2): main digit string */ - if (isdigit((unsigned char) *endptr)) - { - have_digits = true; - do - endptr++; - while (isdigit((unsigned char) *endptr)); - } - - /* Part (3): optional decimal point and fractional digits */ - if (*endptr == '.') - { - endptr++; - if (isdigit((unsigned char) *endptr)) - { - have_digits = true; - do - endptr++; - while (isdigit((unsigned char) *endptr)); - } - } - - /* Complain if we don't have a valid number at this point */ - if (!have_digits) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid size: \"%s\"", str))); - - /* Part (4): optional exponent */ - if (*endptr == 'e' || *endptr == 'E') - { - long exponent; - char *cp; - - /* - * Note we might one day support EB units, so if what follows 'E' - * isn't a number, just treat it all as a unit to be parsed. - */ - exponent = strtol(endptr + 1, &cp, 10); - (void) exponent; /* Silence -Wunused-result warnings */ - if (cp > endptr + 1) - endptr = cp; - } - - /* - * Parse the number, saving the next character, which may be the first - * character of the unit string. - */ - saved_char = *endptr; - *endptr = '\0'; - - num = DatumGetNumeric(DirectFunctionCall3(numeric_in, - CStringGetDatum(strptr), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); - - *endptr = saved_char; - - /* Skip whitespace between number and unit */ - strptr = endptr; - while (isspace((unsigned char) *strptr)) - strptr++; - - /* Handle possible unit */ - if (*strptr != '\0') - { - int64 multiplier = 0; - - /* Trim any trailing whitespace */ - endptr = str + strlen(str) - 1; - - while (isspace((unsigned char) *endptr)) - endptr--; - - endptr++; - *endptr = '\0'; - - /* Parse the unit case-insensitively */ - if (pg_strcasecmp(strptr, "mb") == 0) - multiplier = ((int64) 1); - - else if (pg_strcasecmp(strptr, "gb") == 0) - multiplier = ((int64) 1024); - - else if (pg_strcasecmp(strptr, "tb") == 0) - multiplier = ((int64) 1024) * 1024; - else if (pg_strcasecmp(strptr, "pb") == 0) - multiplier = ((int64) 1024) * 1024 * 1024; - else - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid size: \"%s\"", str), - errdetail("Invalid size unit: \"%s\".", strptr), - errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); - - if (multiplier > 1) - { - Numeric mul_num; - - mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(multiplier))); - - num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, - NumericGetDatum(mul_num), - NumericGetDatum(num))); - } - } - - result = DatumGetInt64(DirectFunctionCall1(numeric_int8, - NumericGetDatum(num))); - - return result; -} - -/* - * trigger start diskquota worker when create extension diskquota - * This function is called at backend side, and will send message to - * diskquota launcher. Luacher process is responsible for starting the real - * diskquota worker process. - */ -Datum -diskquota_start_worker(PG_FUNCTION_ARGS) -{ - int rc; - - /* - * Lock on extension_lock to avoid multiple backend create diskquota - * extension at the same time. - */ - LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); - LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); - message_box->req_pid = MyProcPid; - message_box->cmd = CMD_CREATE_EXTENSION; - message_box->result = ERR_PENDING; - message_box->dbid = MyDatabaseId; - /* setup sig handler to diskquota launcher process */ - rc = kill(message_box->launcher_pid, SIGUSR1); - LWLockRelease(diskquota_locks.message_box_lock); - if (rc == 0) - { - int count = WAIT_TIME_COUNT; - - while (count-- > 0) - { - CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - 100L); - if (rc & WL_POSTMASTER_DEATH) - break; - ResetLatch(&MyProc->procLatch); - LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); - if (message_box->result != ERR_PENDING) - { - LWLockRelease(diskquota_locks.message_box_lock); - break; - } - LWLockRelease(diskquota_locks.message_box_lock); - } - } - LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); - if (message_box->result != ERR_OK) - { - LWLockRelease(diskquota_locks.message_box_lock); - LWLockRelease(diskquota_locks.extension_lock); - elog(ERROR, "[diskquota] failed to create diskquota extension: %s", err_code_to_err_message((MessageResult) message_box->result)); - } - LWLockRelease(diskquota_locks.message_box_lock); - LWLockRelease(diskquota_locks.extension_lock); - PG_RETURN_VOID(); -} - -static void -process_message_box_internal(MessageResult * code, MessageBox local_message_box) -{ - int old_num_db = num_db; - - PG_TRY(); - { - switch (local_message_box.cmd) - { - case CMD_CREATE_EXTENSION: - on_add_db(local_message_box.dbid, code); - num_db++; - *code = ERR_OK; - break; - case CMD_DROP_EXTENSION: - on_del_db(local_message_box.dbid); - num_db--; - *code = ERR_OK; - break; - default: - elog(LOG, "[diskquota]:received unsupported message cmd=%d", local_message_box.cmd); - *code = ERR_UNKNOWN; - break; - } - } - PG_CATCH(); - { - error_context_stack = NULL; - HOLD_INTERRUPTS(); - EmitErrorReport(); - FlushErrorState(); - RESUME_INTERRUPTS(); - num_db = old_num_db; - } - PG_END_TRY(); -} - -/* - * this function is called by launcher process to handle message from other backend - * processes which call CREATE/DROP EXTENSION diskquota; It must be able to catch errors, - * and return an error code back to the backend process. - */ -static void -process_message_box() -{ - MessageResult code = ERR_UNKNOWN; - MessageBox local_message_box; - - LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); - memcpy(&local_message_box, message_box, sizeof(MessageBox)); - LWLockRelease(diskquota_locks.message_box_lock); - - /* create/drop extension message must be valid */ - if (local_message_box.req_pid == 0 || local_message_box.launcher_pid != MyProcPid) - { - return; - } - - elog(LOG, "[diskquota]: received create/drop extension diskquota message"); - StartTransactionCommand(); - process_message_box_internal(&code, local_message_box); - if (code == ERR_OK) - CommitTransactionCommand(); - else - AbortCurrentTransaction(); - - /* Send createdrop extension diskquota result back to QD */ - LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); - memset(message_box, 0, sizeof(MessageBox)); - message_box->launcher_pid = MyProcPid; - message_box->result = (int) code; - LWLockRelease(diskquota_locks.message_box_lock); + return true; } /* - * This hook is used to handle drop extension diskquota event - * It will send CMD_DROP_EXTENSION message to diskquota laucher. - * Laucher will terminate the corresponding worker process and - * remove the dbOid from the database_list table. + * Check whether db oid is valid. */ -static void -dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg) +static bool +is_valid_dbid(Oid dbid) { - Oid oid; - int rc; - - if (access != OAT_DROP || classId != ExtensionRelationId) - goto out; - oid = get_extension_oid("diskquota", true); - if (oid != objectId) - goto out; - - /* - * Lock on extension_lock to avoid multiple backend create diskquota - * extension at the same time. - */ - LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); - LWLockAcquire(diskquota_locks.message_box_lock, LW_EXCLUSIVE); - message_box->req_pid = MyProcPid; - message_box->cmd = CMD_DROP_EXTENSION; - message_box->result = ERR_PENDING; - message_box->dbid = MyDatabaseId; - rc = kill(message_box->launcher_pid, SIGUSR1); - LWLockRelease(diskquota_locks.message_box_lock); - if (rc == 0) - { - int count = WAIT_TIME_COUNT; - - while (count-- > 0) - { - CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - 100L); - if (rc & WL_POSTMASTER_DEATH) - break; - ResetLatch(&MyProc->procLatch); - LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); - if (message_box->result != ERR_PENDING) - { - LWLockRelease(diskquota_locks.message_box_lock); - break; - } - LWLockRelease(diskquota_locks.message_box_lock); - } - } - LWLockAcquire(diskquota_locks.message_box_lock, LW_SHARED); - if (message_box->result != ERR_OK) - { - LWLockRelease(diskquota_locks.message_box_lock); - LWLockRelease(diskquota_locks.extension_lock); - elog(ERROR, "[diskquota] failed to create diskquota extension: %s", err_code_to_err_message((MessageResult) message_box->result)); - } - LWLockRelease(diskquota_locks.message_box_lock); - LWLockRelease(diskquota_locks.extension_lock); -out: - if (next_object_access_hook) - (*next_object_access_hook) (access, classId, objectId, - subId, arg); -} + HeapTuple tuple; -static const char * -err_code_to_err_message(MessageResult code) -{ - switch (code) - { - case ERR_PENDING: - return "no response from launcher, or timeout"; - case ERR_OK: - return "NO ERROR"; - case ERR_EXCEED: - return "too many database to monitor"; - case ERR_ADD_TO_DB: - return "add dbid to database_list failed"; - case ERR_START_WORKER: - return "start worker failed"; - case ERR_INVALID_DBID: - return "invalid dbid"; - default: - return "unknown error"; - } + if (dbid == InvalidOid) + return false; + tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); + if (!HeapTupleIsValid(tuple)) + return false; + ReleaseSysCache(tuple); + return true; } diff --git a/diskquota.h b/diskquota.h index 300f82b738c..ae543d65ff2 100644 --- a/diskquota.h +++ b/diskquota.h @@ -26,7 +26,7 @@ struct DiskQuotaLocks { LWLock *active_table_lock; LWLock *black_map_lock; - LWLock *message_box_lock; + LWLock *extension_ddl_message_lock; LWLock *extension_lock; /* ensure create diskquota extension serially */ }; typedef struct DiskQuotaLocks DiskQuotaLocks; @@ -41,7 +41,7 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; * to stop the diskquota worker process and remove the dbOid from diskquota * database_list table as well. */ -struct MessageBox +struct ExtensionDDLMessage { int launcher_pid; /* diskquota launcher pid */ int req_pid; /* pid of the QD process which create/drop @@ -67,6 +67,8 @@ enum MessageResult ERR_EXCEED, /* add the dbid to diskquota_namespace.database_list failed */ ERR_ADD_TO_DB, + /* delete dbid from diskquota_namespace.database_list failed */ + ERR_DEL_FROM_DB, /* cann't start worker process */ ERR_START_WORKER, /* invalid dbid */ @@ -74,16 +76,19 @@ enum MessageResult ERR_UNKNOWN, }; -typedef struct MessageBox MessageBox; +typedef struct ExtensionDDLMessage ExtensionDDLMessage; typedef enum MessageCommand MessageCommand; typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; -extern MessageBox *message_box; +extern ExtensionDDLMessage *extension_ddl_message; + +/* drop extension hook */ +extern void register_diskquota_object_access_hook(void); /* enforcement interface*/ extern void init_disk_quota_enforcement(void); -extern void diskquota_invalidate_db(Oid dbid); +extern void invalidate_database_blackmap(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); diff --git a/diskquota_utility.c b/diskquota_utility.c new file mode 100644 index 00000000000..fada97215ca --- /dev/null +++ b/diskquota_utility.c @@ -0,0 +1,558 @@ +/* ------------------------------------------------------------------------- + * + * diskquota_utility.c + * + * Diskquota utility contains some help functions for diskquota. + * set_schema_quota and set_role_quota is used by user to set quota limit. + * init_table_size_table is used to initialize table 'diskquota.table_size' + * diskquota_start_worker is used when 'create extension' DDL. It will start + * the corresponding worker process immediately. + * + * Copyright (c) 2018-Present Pivotal Software, Inc. + * + * IDENTIFICATION + * diskquota/diskquota_utility.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include + +#include "access/xact.h" +#include "catalog/namespace.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_database.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_type.h" +#include "commands/dbcommands.h" +#include "commands/extension.h" +#include "executor/spi.h" +#include "nodes/makefuncs.h" +#include "storage/proc.h" +#include "tcop/utility.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/formatting.h" +#include "utils/memutils.h" +#include "utils/numeric.h" + +#include "diskquota.h" + +/* disk quota helper function */ + +PG_FUNCTION_INFO_V1(init_table_size_table); +PG_FUNCTION_INFO_V1(diskquota_start_worker); +PG_FUNCTION_INFO_V1(set_schema_quota); +PG_FUNCTION_INFO_V1(set_role_quota); + +/* timeout count to wait response from launcher process, in 1/10 sec */ +#define WAIT_TIME_COUNT 1200 + +static object_access_hook_type next_object_access_hook; + +static void dq_object_access_hook(ObjectAccessType access, Oid classId, + Oid objectId, int subId, void *arg); +static const char *ddl_err_code_to_err_message(MessageResult code); +static int64 get_size_in_mb(char *str); +static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); + +/* ---- Help Functions to set quota limit. ---- */ +/* + * Initialize table diskquota.table_size. + * calculate table size by UDF pg_total_relation_size + * This function is called by user, errors should not + * be catch, and should be sent back to user + */ +Datum +init_table_size_table(PG_FUNCTION_ARGS) +{ + int ret; + StringInfoData buf; + + RangeVar *rv; + Relation rel; + + /* + * If error happens in init_table_size_table, just return error messages + * to the client side. So there is no need to catch the error. + */ + + /* ensure table diskquota.state exists */ + rv = makeRangeVar("diskquota", "state", -1); + rel = heap_openrv_extended(rv, AccessShareLock, true); + if (!rel) + { + /* configuration table is missing. */ + elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); + } + heap_close(rel, NoLock); + + SPI_connect(); + + /* delete all the table size info in table_size if exist. */ + initStringInfo(&buf); + appendStringInfo(&buf, "delete from diskquota.table_size;"); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "cannot delete table_size table: error code %d", ret); + + /* fill table_size table with table oid and size info. */ + resetStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.table_size " + "select oid, pg_total_relation_size(oid) from pg_class " + "where oid>= %u and (relkind='r' or relkind='m');", + FirstNormalObjectId); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert table_size table: error code %d", ret); + + /* set diskquota state to ready. */ + resetStringInfo(&buf); + appendStringInfo(&buf, + "update diskquota.state set state = %u;", + DISKQUOTA_READY_STATE); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_UPDATE) + elog(ERROR, "cannot update state table: error code %d", ret); + + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * Trigger to start diskquota worker when create extension diskquota. + * This function is called at backend side, and will send message to + * diskquota launcher. Launcher process is responsible for starting the real + * diskquota worker process. + */ +Datum +diskquota_start_worker(PG_FUNCTION_ARGS) +{ + int rc; + + /* + * Lock on extension_lock to avoid multiple backend create diskquota + * extension at the same time. + */ + LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->req_pid = MyProcPid; + extension_ddl_message->cmd = CMD_CREATE_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + /* setup sig handler to diskquota launcher process */ + rc = kill(extension_ddl_message->launcher_pid, SIGUSR1); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + + while (count-- > 0) + { + CHECK_FOR_INTERRUPTS(); + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + 100L); + if (rc & WL_POSTMASTER_DEATH) + break; + ResetLatch(&MyProc->procLatch); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + break; + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + } + } + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_OK) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_lock); + elog(ERROR, "[diskquota] failed to create diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_lock); + PG_RETURN_VOID(); +} + +/* + * Add dq_object_access_hook to handle drop extension event. + */ +void +register_diskquota_object_access_hook(void) +{ + next_object_access_hook = object_access_hook; + object_access_hook = dq_object_access_hook; +} + +/* + * This hook is used to handle drop extension diskquota event + * It will send CMD_DROP_EXTENSION message to diskquota laucher. + * Laucher will terminate the corresponding worker process and + * remove the dbOid from the database_list table. + */ +static void +dq_object_access_hook(ObjectAccessType access, Oid classId, + Oid objectId, int subId, void *arg) +{ + Oid oid; + int rc; + + if (access != OAT_DROP || classId != ExtensionRelationId) + goto out; + oid = get_extension_oid("diskquota", true); + if (oid != objectId) + goto out; + + /* + * Lock on extension_lock to avoid multiple backend create diskquota + * extension at the same time. + */ + LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); + extension_ddl_message->req_pid = MyProcPid; + extension_ddl_message->cmd = CMD_DROP_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + rc = kill(extension_ddl_message->launcher_pid, SIGUSR1); + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + if (rc == 0) + { + int count = WAIT_TIME_COUNT; + + while (count-- > 0) + { + CHECK_FOR_INTERRUPTS(); + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + 100L); + if (rc & WL_POSTMASTER_DEATH) + break; + ResetLatch(&MyProc->procLatch); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_PENDING) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + break; + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + } + } + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); + if (extension_ddl_message->result != ERR_OK) + { + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_lock); + elog(ERROR, "[diskquota launcher] failed to drop diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); + } + LWLockRelease(diskquota_locks.extension_ddl_message_lock); + LWLockRelease(diskquota_locks.extension_lock); +out: + if (next_object_access_hook) + (*next_object_access_hook) (access, classId, objectId, + subId, arg); +} + +/* + * For extension DDL('create extension/drop extension') + * Using this function to convert error code from diskquota + * launcher to error message and return it to client. + */ +static const char * +ddl_err_code_to_err_message(MessageResult code) +{ + switch (code) + { + case ERR_PENDING: + return "no response from diskquota launcher, check whether launcher process exists"; + case ERR_OK: + return "succeeded"; + case ERR_EXCEED: + return "too many databases to monitor"; + case ERR_ADD_TO_DB: + return "add dbid to database_list failed"; + case ERR_DEL_FROM_DB: + return "delete dbid from database_list failed"; + case ERR_START_WORKER: + return "start diskquota worker failed"; + case ERR_INVALID_DBID: + return "invalid dbid"; + default: + return "unknown error"; + } +} + + +/* + * Set disk quota limit for role. + */ +Datum +set_role_quota(PG_FUNCTION_ARGS) +{ + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); + roleoid = get_role_oid(rolname, false); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_quota_internal(roleoid, quota_limit_mb, ROLE_QUOTA); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for schema. + */ +Datum +set_schema_quota(PG_FUNCTION_ARGS) +{ + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); + namespaceoid = get_namespace_oid(nspname, false); + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_quota_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); + PG_RETURN_VOID(); +} + +/* + * Write the quota limit info into quota_config table under + * 'diskquota' schema of the current database. + */ +static void +set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) +{ + int ret; + StringInfoData buf; + + initStringInfo(&buf); + appendStringInfo(&buf, + "select true from diskquota.quota_config where targetoid = %u" + " and quotatype =%d", + targetoid, type); + + /* + * If error happens in set_quota_internal, just return error messages to + * the client side. So there is no need to catch the error. + */ + SPI_connect(); + + ret = SPI_execute(buf.data, true, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select quota setting table: error code %d", ret); + + /* if the schema or role's quota has been set before */ + if (SPI_processed == 0 && quota_limit_mb > 0) + { + resetStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.quota_config values(%u,%d,%ld);", + targetoid, type, quota_limit_mb); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } + else if (SPI_processed > 0 && quota_limit_mb <= 0) + { + resetStringInfo(&buf); + appendStringInfo(&buf, + "delete from diskquota.quota_config where targetoid=%u" + " and quotatype=%d;", + targetoid, type); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + } + else if (SPI_processed > 0 && quota_limit_mb > 0) + { + resetStringInfo(&buf); + appendStringInfo(&buf, + "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" + " and quotatype=%d;", + quota_limit_mb, targetoid, type); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_UPDATE) + elog(ERROR, "cannot update quota setting table, error code %d", ret); + } + + /* + * And finish our transaction. + */ + SPI_finish(); + return; +} + +/* + * Convert a human-readable size to a size in MB. + */ +static int64 +get_size_in_mb(char *str) +{ + char *strptr, + *endptr; + char saved_char; + Numeric num; + int64 result; + bool have_digits = false; + + /* Skip leading whitespace */ + strptr = str; + while (isspace((unsigned char) *strptr)) + strptr++; + + /* Check that we have a valid number and determine where it ends */ + endptr = strptr; + + /* Part (1): sign */ + if (*endptr == '-' || *endptr == '+') + endptr++; + + /* Part (2): main digit string */ + if (isdigit((unsigned char) *endptr)) + { + have_digits = true; + do + endptr++; + while (isdigit((unsigned char) *endptr)); + } + + /* Part (3): optional decimal point and fractional digits */ + if (*endptr == '.') + { + endptr++; + if (isdigit((unsigned char) *endptr)) + { + have_digits = true; + do + endptr++; + while (isdigit((unsigned char) *endptr)); + } + } + + /* Complain if we don't have a valid number at this point */ + if (!have_digits) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid size: \"%s\"", str))); + + /* Part (4): optional exponent */ + if (*endptr == 'e' || *endptr == 'E') + { + long exponent; + char *cp; + + /* + * Note we might one day support EB units, so if what follows 'E' + * isn't a number, just treat it all as a unit to be parsed. + */ + exponent = strtol(endptr + 1, &cp, 10); + (void) exponent; /* Silence -Wunused-result warnings */ + if (cp > endptr + 1) + endptr = cp; + } + + /* + * Parse the number, saving the next character, which may be the first + * character of the unit string. + */ + saved_char = *endptr; + *endptr = '\0'; + + num = DatumGetNumeric(DirectFunctionCall3(numeric_in, + CStringGetDatum(strptr), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); + + *endptr = saved_char; + + /* Skip whitespace between number and unit */ + strptr = endptr; + while (isspace((unsigned char) *strptr)) + strptr++; + + /* Handle possible unit */ + if (*strptr != '\0') + { + int64 multiplier = 0; + + /* Trim any trailing whitespace */ + endptr = str + strlen(str) - 1; + + while (isspace((unsigned char) *endptr)) + endptr--; + + endptr++; + *endptr = '\0'; + + /* Parse the unit case-insensitively */ + if (pg_strcasecmp(strptr, "mb") == 0) + multiplier = ((int64) 1); + + else if (pg_strcasecmp(strptr, "gb") == 0) + multiplier = ((int64) 1024); + + else if (pg_strcasecmp(strptr, "tb") == 0) + multiplier = ((int64) 1024) * 1024; + else if (pg_strcasecmp(strptr, "pb") == 0) + multiplier = ((int64) 1024) * 1024 * 1024; + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid size: \"%s\"", str), + errdetail("Invalid size unit: \"%s\".", strptr), + errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); + + if (multiplier > 1) + { + Numeric mul_num; + + mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, + Int64GetDatum(multiplier))); + + num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, + NumericGetDatum(mul_num), + NumericGetDatum(num))); + } + } + + result = DatumGetInt64(DirectFunctionCall1(numeric_int8, + NumericGetDatum(num))); + + return result; +} diff --git a/enforcement.c b/enforcement.c index 41d3c2a90b1..687653507dc 100644 --- a/enforcement.c +++ b/enforcement.c @@ -8,7 +8,7 @@ * Copyright (c) 2018-Present Pivotal Software, Inc. * * IDENTIFICATION - * gpcontrib/gp_diskquota/enforcement.c + * diskquota/enforcement.c * * ------------------------------------------------------------------------- */ diff --git a/expected/test_extension.out b/expected/test_extension.out index 791de0c5f2b..4f3d71bd207 100644 --- a/expected/test_extension.out +++ b/expected/test_extension.out @@ -248,13 +248,13 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1102) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:175) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many database to monitor (diskquota.c:1102) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:175) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 diff --git a/gp_activetable.c b/gp_activetable.c index e53de53af81..170c88452cc 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -7,7 +7,7 @@ * Copyright (c) 2018-Present Pivotal Software, Inc. * * IDENTIFICATION - * gpcontrib/gp_diskquota/gp_activetable.c + * diskquota/gp_activetable.c * * ------------------------------------------------------------------------- */ diff --git a/init_file b/init_file index 34a7542c0d5..5261e4efb5d 100644 --- a/init_file +++ b/init_file @@ -7,4 +7,6 @@ -- start_matchsubs m/diskquota.c:\d+\)/ s/diskquota.c:\d+\)/diskquota.c:xxx/ +m/diskquota_utility.c:\d+\)/ +s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ -- end_matchsubs diff --git a/quotamodel.c b/quotamodel.c index 5d17e844f06..080d15b3482 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -8,7 +8,7 @@ * Copyright (c) 2018-Present Pivotal Software, Inc. * * IDENTIFICATION - * gpcontrib/gp_diskquota/quotamodel.c + * diskquota/quotamodel.c * * ------------------------------------------------------------------------- */ @@ -152,7 +152,7 @@ DiskQuotaShmemSize(void) { Size size; - size = sizeof(MessageBox); + size = sizeof(ExtensionDDLMessage); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); return size; @@ -163,7 +163,7 @@ init_lwlocks(void) { diskquota_locks.active_table_lock = LWLockAssign(); diskquota_locks.black_map_lock = LWLockAssign(); - diskquota_locks.message_box_lock = LWLockAssign(); + diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_lock = LWLockAssign(); } @@ -185,11 +185,11 @@ disk_quota_shmem_startup(void) LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); init_lwlocks(); - message_box = ShmemInitStruct("disk_quota_message_box", - sizeof(MessageBox), - &found); + extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", + sizeof(ExtensionDDLMessage), + &found); if (!found) - memset((void *) message_box, 0, sizeof(MessageBox)); + memset((void *) extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); @@ -363,7 +363,7 @@ check_diskquota_state_is_ready(void) bool is_ready = false; bool connected = false; bool pushed_active_snap = false; - bool error_happens = false; + bool ret = true; StartTransactionCommand(); @@ -391,7 +391,7 @@ check_diskquota_state_is_ready(void) HOLD_INTERRUPTS(); EmitErrorReport(); FlushErrorState(); - error_happens = true; + ret = false; /* Now we can allow interrupts again */ RESUME_INTERRUPTS(); } @@ -400,7 +400,7 @@ check_diskquota_state_is_ready(void) SPI_finish(); if (pushed_active_snap) PopActiveSnapshot(); - if (error_happens) + if (ret) CommitTransactionCommand(); else AbortCurrentTransaction(); @@ -1020,20 +1020,6 @@ do_load_quotas(void) QuotaLimitEntry *quota_entry; HASH_SEQ_STATUS iter; - RangeVar *rv; - Relation rel; - - rv = makeRangeVar("diskquota", "quota_config", -1); - rel = heap_openrv_extended(rv, AccessShareLock, true); - if (!rel) - { - /* configuration table is missing. */ - elog(ERROR, "[diskquota] configuration table \"quota_config\" is missing in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); - } - heap_close(rel, AccessShareLock); - /* * TODO: we should skip to reload quota config when there is no change in * quota.config. A flag in shared memory could be used to detect the quota @@ -1196,7 +1182,7 @@ quota_check_common(Oid reloid) * invalidate all black entry with a specific dbid in SHM */ void -diskquota_invalidate_db(Oid dbid) +invalidate_database_blackmap(Oid dbid) { BlackMapEntry *entry; HASH_SEQ_STATUS iter; From 5ed62f05196e6e6029b010fa67742d56b210caaf Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Wed, 10 Apr 2019 22:03:44 +0800 Subject: [PATCH 024/330] Update concourse pipeline to use gcs. (#21) --- concourse/pipeline/pipeline.yml | 61 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 8484259ffe4..5d17dd9dc30 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -10,6 +10,12 @@ groups: - diskquota_centos6_test - diskquota_centos7_test +resource_types: +- name: gcs + type: docker-image + source: + repository: frodenas/gcs-resource + resources: # Image Resources @@ -37,47 +43,38 @@ resources: - name: diskquota_src type: git source: - branch: gpdbpipeline - uri: https://github.com/zhangh43/diskquota.git + branch: gpdb + uri: https://github.com/greenplum-db/diskquota.git -# centos 7 - -- name: bin_diskquota_centos7 - type: s3 +# gpdb binary on gcs is located as different folder for different version +# use gcs_gpdb_binary_folder to specify them. +- name: bin_gpdb_centos6 + type: gcs source: - bucket: {{pl-bucket-name}} - region_name: {{aws-region}} - access_key_id: {{bucket-access-key-id}} - secret_access_key: {{bucket-secret-access-key}} - versioned_file: build/gpdb6/diskquota/component_diskquota.tar.gz - + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz - name: bin_gpdb_centos7 - type: s3 + type: gcs source: - bucket: {{bucket-name}} - region_name: {{aws-region}} - access_key_id: {{bucket-access-key-id}} - secret_access_key: {{bucket-secret-access-key}} - versioned_file: bin_gpdb_centos7/bin_gpdb.tar.gz + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz -- name: bin_diskquota_centos6 - type: s3 +- name: bin_diskquota_centos7 + type: gcs source: - bucket: {{pl-bucket-name}} - region_name: {{aws-region}} - access_key_id: {{bucket-access-key-id}} - secret_access_key: {{bucket-secret-access-key}} - versioned_file: build/gpdb6/diskquota/component_diskquota.tar.gz + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/rhel7/component_diskquota.tar.gz -- name: bin_gpdb_centos6 - type: s3 +- name: bin_diskquota_centos6 + type: gcs source: - bucket: {{bucket-name}} - region_name: {{aws-region}} - access_key_id: {{bucket-access-key-id}} - secret_access_key: {{bucket-secret-access-key}} - versioned_file: bin_gpdb_centos/bin_gpdb.tar.gz + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/rhel6/component_diskquota.tar.gz ## jobs ## ====================================================================== From 82e8ad26c2e28c9865b2d81e16cb60ca2383e577 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 9 Apr 2019 17:18:58 +0800 Subject: [PATCH 025/330] Add message when create extension and db is not empty Warning user to run select diskquota.init_table_size_table() to build the table size information. --- diskquota--1.0.sql | 4 +-- diskquota_utility.c | 55 ++++++++++++++++++++++++++++++++++++- expected/test_extension.out | 5 ++-- 3 files changed, 59 insertions(+), 5 deletions(-) diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index ffb89600b2a..e326f0bb96c 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -6,7 +6,7 @@ CREATE SCHEMA diskquota; -- Configuration table -create table diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); +CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); @@ -20,7 +20,7 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE TABLE diskquota.table_size (tableid oid, size int8, PRIMARY KEY(tableid)); +CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); diff --git a/diskquota_utility.c b/diskquota_utility.c index fada97215ca..f87e48f2bb4 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -52,7 +52,7 @@ PG_FUNCTION_INFO_V1(set_role_quota); #define WAIT_TIME_COUNT 1200 static object_access_hook_type next_object_access_hook; - +static bool is_database_empty(void); static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static const char *ddl_err_code_to_err_message(MessageResult code); @@ -180,9 +180,62 @@ diskquota_start_worker(PG_FUNCTION_ARGS) } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_lock); + + /* notify DBA to run init_table_size_table() when db is not empty */ + if (!is_database_empty()) + { + ereport(WARNING, (errmsg("database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time."))); + } PG_RETURN_VOID(); } +/* + * Check whether database is empty (no user table created) + */ +static bool +is_database_empty(void) +{ + int ret; + StringInfoData buf; + TupleDesc tupdesc; + bool is_empty = false; + + initStringInfo(&buf); + appendStringInfo(&buf, + "SELECT (count(relname) = 0) FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'"); + + /* + * If error happens in is_database_empty, just return error messages to + * the client side. So there is no need to catch the error. + */ + SPI_connect(); + + ret = SPI_execute(buf.data, true, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select pg_class and pg_namespace table: error code %d", errno); + tupdesc = SPI_tuptable->tupdesc; + /* check sql return value whether database is empty */ + if (SPI_processed > 0) + { + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (!isnull) + { + /* check whether condition `count(relname) = 0` is true */ + is_empty = DatumGetBool(dat); + } + } + + /* + * And finish our transaction. + */ + SPI_finish(); + return is_empty; +} + /* * Add dq_object_access_hook to handle drop extension event. */ diff --git a/expected/test_extension.out b/expected/test_extension.out index 4f3d71bd207..ff831e27fa1 100644 --- a/expected/test_extension.out +++ b/expected/test_extension.out @@ -53,6 +53,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; +WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. SELECT diskquota.init_table_size_table(); init_table_size_table ----------------------- @@ -248,13 +249,13 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:175) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:179) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:175) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:179) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 From 69555e5a23ba9e4dcad26b6a458612d626b9a97a Mon Sep 17 00:00:00 2001 From: Weinan WANG Date: Thu, 11 Apr 2019 14:21:59 +0800 Subject: [PATCH 026/330] Add schema & role quota quick search view (#24) add view "diskquota.show_fast_schema_quota_view" and "diskquota.show_fast_role_quota_view" using diskquota.table_size info instead of physical stat each data file to accelerate schema/role size calculation. delete diskquota.show_schema_quota_view and diskquota.show_role_quota_view since the plan does not correct. --- README.md | 6 +++--- diskquota--1.0.sql | 29 ++++++++++++++++++----------- expected/test_primary_failure.out | 10 ++++++++-- expected/test_schema.out | 8 +++++++- sql/test_primary_failure.sql | 3 ++- sql/test_schema.sql | 3 ++- 6 files changed, 40 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 261cae3b7c1..d980fc2a94b 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ reset search_path; 3. Show schema quota limit and current usage ``` -select * from diskquota.show_schema_quota_view; +select * from diskquota.show_fast_schema_quota_view; ``` @@ -235,10 +235,10 @@ AccessShareLock to relation (And worker process don't even know this reloid exis we need to skip it, and call stat() directly with tolerant to file unlink. Skip lock is dangerous and we plan to leave it as known issue at current stage. -2. Missing empty schema or role in show_schema_quota_view and show_role_quota_view +2. Missing empty schema or role in show_fast_schema_quota_view and show_fast_role_quota_view Currently, if there is no table in a specific schema or no table's owner is a specific role, these schemas or roles will not be listed in -show_schema_quota_view and show_role_quota_view. +show_fast_schema_quota_view and show_fast_role_quota_view. 3. Out of shared memory diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index e326f0bb96c..f57e59773a7 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -36,17 +36,24 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE VIEW diskquota.show_schema_quota_view AS -SELECT pg_namespace.nspname as schema_name, pg_class.relnamespace as schema_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as nspsize_in_bytes -FROM pg_namespace, pg_class, diskquota.quota_config as quota -WHERE pg_class.relnamespace = quota.targetoid and pg_class.relnamespace = pg_namespace.oid and quota.quotatype=0 -GROUP BY pg_class.relnamespace, pg_namespace.nspname, quota.quotalimitMB; - -CREATE VIEW diskquota.show_role_quota_view AS -SELECT pg_roles.rolname as role_name, pg_class.relowner as role_oid, quota.quotalimitMB as quota_in_mb, sum(pg_total_relation_size(pg_class.oid)) as rolsize_in_bytes -FROM pg_roles, pg_class, diskquota.quota_config as quota -WHERE pg_class.relowner = quota.targetoid and pg_class.relowner = pg_roles.oid and quota.quotatype=1 -GROUP BY pg_class.relowner, pg_roles.rolname, quota.quotalimitMB; +CREATE VIEW diskquota.show_fast_schema_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace +group by relnamespace, qc.quotalimitMB, pgns.nspname +order by pgns.nspname; + +CREATE VIEW diskquota.show_fast_role_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid +GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; CREATE VIEW diskquota.database_size_view AS SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out index 7e8962098be..f6c3669cc09 100644 --- a/expected/test_primary_failure.out +++ b/expected/test_primary_failure.out @@ -91,10 +91,16 @@ select content, preferred_role, role, status, mode from gp_segment_configuration (2 rows) -- no sleep, it will take effect immediately -SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_schema_quota_view where schema_name='ftsr'; +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; quota_in_mb | nspsize_in_bytes -------------+------------------ - 200 | 1310720 + 200 | 3833856 (1 row) INSERT INTO a SELECT generate_series(1,100); diff --git a/expected/test_schema.out b/expected/test_schema.out index 635a83a3986..5440f445e5d 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -45,7 +45,13 @@ ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); ERROR: schema's disk space quota exceeded with name:badquota -SELECT schema_name, quota_in_mb FROM diskquota.show_schema_quota_view WHERE schema_name = 's1'; +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; schema_name | quota_in_mb -------------+------------- s1 | 1 diff --git a/sql/test_primary_failure.sql b/sql/test_primary_failure.sql index 513417f8a23..ed1e358827e 100644 --- a/sql/test_primary_failure.sql +++ b/sql/test_primary_failure.sql @@ -63,7 +63,8 @@ select pg_recoverseg((select datadir from gp_segment_configuration c where c.rol -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; -- no sleep, it will take effect immediately -SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_schema_quota_view where schema_name='ftsr'; +SELECT pg_sleep(10); +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 3bce5a08fad..3e825db617d 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -27,7 +27,8 @@ ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); -SELECT schema_name, quota_in_mb FROM diskquota.show_schema_quota_view WHERE schema_name = 's1'; +SELECT pg_sleep(10); +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; RESET search_path; DROP TABLE s1.a2, badquota.a; From 298f4512bd2973e272e6bc43725d6eb552d14dd8 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 11 Apr 2019 07:38:46 +0000 Subject: [PATCH 027/330] Fix answer file. --- expected/test_primary_failure.out | 171 +++++++++++++++++++++++++++++- 1 file changed, 168 insertions(+), 3 deletions(-) diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out index f6c3669cc09..7dfdfd0469b 100644 --- a/expected/test_primary_failure.out +++ b/expected/test_primary_failure.out @@ -80,7 +80,172 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); + pg_recoverseg +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -a -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + + 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ + 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Standard + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery 1 of 1 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Synchronization mode = Incremental + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance host = instance-1 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance address = instance-1 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance port = 25432 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance host = instance-1 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance address = instance-1 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance port = 25435 + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Target = in-place + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-1 segment(s) to recover + + 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating configuration with new mirrors + + 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating mirrors + + 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Running pg_rewind on required mirrors + + 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting mirrors + + 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-era is 3840cc8b7490e1d1_190411071143 + + 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Commencing parallel segment instance startup, please wait + + + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Process results + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering FTS probe + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating segments for streaming is completed + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-For segments updated successfully, streaming will continue in the background + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -s to check the streaming progress + + 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + +(1 row) + +select pg_sleep(10); + pg_sleep +---------- + +(1 row) + +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); + pg_recoverseg +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -ar -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Rebalance + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Unbalanced segment 1 of 2 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance host = instance-1 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance address = instance-1 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance port = 25435 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Balanced role = Mirror + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Current role = Primary + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Unbalanced segment 2 of 2 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance host = instance-1 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance address = instance-1 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance port = 25432 + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Balanced role = Primary + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Current role = Mirror + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Getting unbalanced segments + + 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Stopping unbalanced primary segments + + 20190411:07:16:17:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering segment reconfiguration + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting segment synchronization + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-=============================START ANOTHER RECOVER========================================= + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Standard + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery 1 of 1 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Synchronization mode = Incremental + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance host = instance-1 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance address = instance-1 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance port = 25435 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance host = instance-1 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance address = instance-1 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance port = 25432 + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Target = in-place + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-1 segment(s) to recover + + 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20190411:07:16:25:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating configuration with new mirrors + + 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating mirrors + + 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Running pg_rewind on required mirrors + + 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting mirrors + + 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-era is 3840cc8b7490e1d1_190411071143 + + 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Commencing parallel segment instance startup, please wait + + + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Process results + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering FTS probe + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating segments for streaming is completed + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-For segments updated successfully, streaming will continue in the background + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -s to check the streaming progress + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-==============================END ANOTHER RECOVER========================================== + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-The rebalance operation has completed successfully + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-There is a resynchronization running in the background to bring all + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-segments in sync + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -e to check the resynchronization progress + + 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + + +(1 row) + +select pg_sleep(15); + pg_sleep +---------- + +(1 row) + +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); + pg_recoverseg +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -a -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + + 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ + 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + + 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-No segments to recover + + +(1 row) + +select pg_sleep(10); + pg_sleep +---------- + +(1 row) + select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); + pg_recoverseg +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -ar -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + + 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ + 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + + 20190411:07:16:54:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-No segments are running in their non-preferred role and need to be rebalanced + + +(1 row) + -- end_ignore -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; @@ -92,15 +257,15 @@ select content, preferred_role, role, status, mode from gp_segment_configuration -- no sleep, it will take effect immediately SELECT pg_sleep(10); - pg_sleep + pg_sleep ---------- - + (1 row) SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; quota_in_mb | nspsize_in_bytes -------------+------------------ - 200 | 3833856 + 200 | 3932160 (1 row) INSERT INTO a SELECT generate_series(1,100); From cbb14c8b120da2bebf473c39e7434d99386c61ab Mon Sep 17 00:00:00 2001 From: Weinan WANG Date: Thu, 11 Apr 2019 16:09:01 +0800 Subject: [PATCH 028/330] Import idle worker terminate mechanism (#27) * Import idle worker terminate mechanism As normal DB connection, QD terminates QE processes if the connection is idle for a while. Import this mechanism in diskquota_launcher_process --- diskquota.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/diskquota.c b/diskquota.c index c2f8307c6bc..d97c49996e9 100644 --- a/diskquota.c +++ b/diskquota.c @@ -17,6 +17,7 @@ #include "postgres.h" #include +#include #include "access/tupdesc.h" #include "access/xact.h" @@ -37,6 +38,7 @@ #include "postmaster/bgworker.h" #include "storage/ipc.h" #include "storage/proc.h" +#include "tcop/idle_resource_cleaner.h" #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" @@ -432,6 +434,8 @@ disk_quota_launcher_main(Datum main_arg) start_workers_from_dblist(); /* main loop: do this until the SIGTERM handler tells us to terminate. */ + EnableClientWaitTimeoutInterrupt(); + StartIdleResourceCleanupTimers(); while (!got_sigterm) { int rc; @@ -457,14 +461,18 @@ disk_quota_launcher_main(Datum main_arg) if (got_sigusr1) { got_sigusr1 = false; + CancelIdleResourceCleanupTimers(); process_extension_ddl_message(); + StartIdleResourceCleanupTimers(); } /* in case of a SIGHUP, just reload the configuration. */ if (got_sighup) { got_sighup = false; + CancelIdleResourceCleanupTimers(); ProcessConfigFile(PGC_SIGHUP); + StartIdleResourceCleanupTimers(); } } From f6021de24bad21301e27f1814c5154c1a225e14e Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 11 Apr 2019 17:12:48 +0800 Subject: [PATCH 029/330] Refactor quota model. (#23) clear the logic in calculate_table_disk_usage rename lock extension_ddl_lock add comments for quotamodel release lock when elog error. using ereport to replace elog. --- diskquota.h | 2 +- diskquota_utility.c | 16 +- quotamodel.c | 793 +++++++++++++++++++++++--------------------- 3 files changed, 417 insertions(+), 394 deletions(-) diff --git a/diskquota.h b/diskquota.h index ae543d65ff2..ad2c373288b 100644 --- a/diskquota.h +++ b/diskquota.h @@ -27,7 +27,7 @@ struct DiskQuotaLocks LWLock *active_table_lock; LWLock *black_map_lock; LWLock *extension_ddl_message_lock; - LWLock *extension_lock; /* ensure create diskquota extension serially */ + LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ }; typedef struct DiskQuotaLocks DiskQuotaLocks; diff --git a/diskquota_utility.c b/diskquota_utility.c index f87e48f2bb4..15e4b2f73f0 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -137,10 +137,10 @@ diskquota_start_worker(PG_FUNCTION_ARGS) int rc; /* - * Lock on extension_lock to avoid multiple backend create diskquota + * Lock on extension_ddl_lock to avoid multiple backend create diskquota * extension at the same time. */ - LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); extension_ddl_message->req_pid = MyProcPid; extension_ddl_message->cmd = CMD_CREATE_EXTENSION; @@ -175,11 +175,11 @@ diskquota_start_worker(PG_FUNCTION_ARGS) if (extension_ddl_message->result != ERR_OK) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); - LWLockRelease(diskquota_locks.extension_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); elog(ERROR, "[diskquota] failed to create diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); - LWLockRelease(diskquota_locks.extension_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); /* notify DBA to run init_table_size_table() when db is not empty */ if (!is_database_empty()) @@ -266,10 +266,10 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, goto out; /* - * Lock on extension_lock to avoid multiple backend create diskquota + * Lock on extension_ddl_lock to avoid multiple backend create diskquota * extension at the same time. */ - LWLockAcquire(diskquota_locks.extension_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); extension_ddl_message->req_pid = MyProcPid; extension_ddl_message->cmd = CMD_DROP_EXTENSION; @@ -303,11 +303,11 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, if (extension_ddl_message->result != ERR_OK) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); - LWLockRelease(diskquota_locks.extension_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); elog(ERROR, "[diskquota launcher] failed to drop diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); - LWLockRelease(diskquota_locks.extension_lock); + LWLockRelease(diskquota_locks.extension_ddl_lock); out: if (next_object_access_hook) (*next_object_access_hook) (access, classId, objectId, diff --git a/quotamodel.c b/quotamodel.c index 080d15b3482..f8d9f297cbc 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -3,7 +3,8 @@ * quotamodel.c * * This code is responsible for init disk quota model and refresh disk quota - * model. + * model. Disk quota related Shared memory initialization is also implemented + * in this file. * * Copyright (c) 2018-Present Pivotal Software, Inc. * @@ -67,7 +68,8 @@ struct TableSizeEntry Oid reloid; Oid namespaceoid; Oid owneroid; - int64 totalsize; + int64 totalsize; /* table size including fsm, visibility map + * etc. */ bool is_exist; /* flag used to check whether table is already * dropped */ bool need_flush; /* whether need to flush to table table_size */ @@ -140,38 +142,38 @@ static bool do_check_diskquota_state_is_ready(void); static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); +static void init_lwlocks(void); static void truncateStringInfo(StringInfo str, int nchars); +/* ---- Functions for disk quota shared memory ---- */ /* - * DiskQuotaShmemSize - * Compute space needed for diskquota-related shared memory + * DiskQuotaShmemInit + * Allocate and initialize diskquota-related shared memory + * This function is called in _PG_init(). */ -Size -DiskQuotaShmemSize(void) +void +init_disk_quota_shmem(void) { - Size size; - - size = sizeof(ExtensionDDLMessage); - size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); - size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); - return size; -} + /* + * Request additional shared resources. (These are no-ops if we're not in + * the postmaster process.) We'll allocate or attach to the shared + * resources in pgss_shmem_startup(). + */ + RequestAddinShmemSpace(DiskQuotaShmemSize()); + /* 4 locks for diskquota refer to init_lwlocks() for details */ + RequestAddinLWLocks(4); -static void -init_lwlocks(void) -{ - diskquota_locks.active_table_lock = LWLockAssign(); - diskquota_locks.black_map_lock = LWLockAssign(); - diskquota_locks.extension_ddl_message_lock = LWLockAssign(); - diskquota_locks.extension_lock = LWLockAssign(); + /* Install startup hook to initialize our shared memory. */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = disk_quota_shmem_startup; } /* - * DiskQuotaShmemInit - * Allocate and initialize diskquota-related shared memory + * DiskQuotaShmemInit hooks. + * Initialize shared memory data and locks. */ -void +static void disk_quota_shmem_startup(void) { bool found; @@ -180,11 +182,16 @@ disk_quota_shmem_startup(void) if (prev_shmem_startup_hook) (*prev_shmem_startup_hook) (); - disk_quota_black_map = NULL; - LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); init_lwlocks(); + + /* + * Three shared memory data. extension_ddl_message is used to handle + * diskquota extension create/drop command. disk_quota_black_map is used + * to store out-of-quota blacklist. active_tables_map is used to store + * active tables whose disk usage is changed. + */ extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); @@ -207,24 +214,41 @@ disk_quota_shmem_startup(void) LWLockRelease(AddinShmemInitLock); } -void -init_disk_quota_shmem(void) +/* + * Initialize four shared memory locks. + * active_table_lock is used to access active table map. + * black_map_lock is used to access out-of-quota blacklist. + * extension_ddl_message_lock is used to access content of + * extension_ddl_message. + * extension_ddl_lock is used to avoid concurrent diskquota + * extension ddl(create/drop) command. + */ +static void +init_lwlocks(void) { - /* - * Request additional shared resources. (These are no-ops if we're not in - * the postmaster process.) We'll allocate or attach to the shared - * resources in pgss_shmem_startup(). - */ - RequestAddinShmemSpace(DiskQuotaShmemSize()); - RequestAddinLWLocks(4); + diskquota_locks.active_table_lock = LWLockAssign(); + diskquota_locks.black_map_lock = LWLockAssign(); + diskquota_locks.extension_ddl_message_lock = LWLockAssign(); + diskquota_locks.extension_ddl_lock = LWLockAssign(); +} - /* - * Install startup hook to initialize our shared memory. - */ - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = disk_quota_shmem_startup; +/* + * DiskQuotaShmemSize + * Compute space needed for diskquota-related shared memory + */ +static Size +DiskQuotaShmemSize(void) +{ + Size size; + + size = sizeof(ExtensionDDLMessage); + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); + return size; } + +/* ---- Functions for disk quota model ---- */ /* * Init disk quota model when the worker process firstly started. */ @@ -233,7 +257,7 @@ init_disk_quota_model(void) { HASHCTL hash_ctl; - /* init hash table for table/schema/role etc. */ + /* initialize hash table for table/schema/role etc. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(TableSizeEntry); @@ -267,6 +291,7 @@ init_disk_quota_model(void) &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + /* initialize hash table for quota limit */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(QuotaLimitEntry); @@ -283,6 +308,10 @@ init_disk_quota_model(void) &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + /* + * local diskquota black map is used to reduce the lock hold time of + * blackmap in shared memory + */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); hash_ctl.entrysize = sizeof(LocalBlackMapEntry); @@ -295,65 +324,6 @@ init_disk_quota_model(void) HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } -/* - * Check whether the diskquota state is ready - */ -static bool -do_check_diskquota_state_is_ready(void) -{ - int ret; - TupleDesc tupdesc; - int i; - - RangeVar *rv; - Relation rel; - - /* check table diskquota.state exists */ - rv = makeRangeVar("diskquota", "state", -1); - rel = heap_openrv_extended(rv, AccessShareLock, true); - if (!rel) - { - return false; - } - heap_close(rel, AccessShareLock); - - /* check diskquota state from table diskquota.state */ - ret = SPI_execute("select state from diskquota.state", true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "[diskquota] check diskquota state SPI_execute failed: error code %d", ret); - - tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 1 || - ((tupdesc)->attrs[0])->atttypid != INT4OID) - { - elog(ERROR, "[diskquota] table \"state\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); - return false; - } - - for (i = 0; i < SPI_processed; i++) - { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - int state; - bool isnull; - - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - continue; - state = DatumGetInt64(dat); - - if (state == DISKQUOTA_READY_STATE) - { - return true; - } - } - ereport(LOG, (errmsg("Diskquota is not in ready state. " - "please run UDF init_table_size_table()"))); - return false; -} - /* * Check whether the diskquota state is ready */ @@ -378,7 +348,7 @@ check_diskquota_state_is_ready(void) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("[diskquota] unable to connect to execute SPI query"))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -409,27 +379,89 @@ check_diskquota_state_is_ready(void) } /* - * diskquota worker will refresh disk quota model + * Check whether the diskquota state is ready + * For empty database, the diskquota state would + * be ready after 'create extension diskquota' and + * it's ready to use. But for non-empty database, + * user need to run UDF diskquota.init_table_size_table() + * manually to get all the table size information and + * store them into table diskquota.table_size + */ +static bool +do_check_diskquota_state_is_ready(void) +{ + int ret; + TupleDesc tupdesc; + int i; + + /* + * check diskquota state from table diskquota.state errors will be catch + * at upper level function. + */ + ret = SPI_execute("select state from diskquota.state", true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 1 || + ((tupdesc)->attrs[0])->atttypid != INT4OID) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] table \"state\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + } + + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + int state; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + continue; + state = DatumGetInt64(dat); + + if (state == DISKQUOTA_READY_STATE) + { + return true; + } + } + ereport(WARNING, (errmsg("Diskquota is not in ready state. " + "please run UDF init_table_size_table()"))); + return false; +} + +/* + * Diskquota worker will refresh disk quota model * periodically. It will reload quota setting and * recalculate the changed disk usage. */ void refresh_disk_quota_model(bool is_init) { - elog(LOG, "[diskquota] start refresh_disk_quota_model"); + if (is_init) + ereport(LOG, (errmsg("[diskquota] initialize quota model started"))); /* skip refresh model when load_quotas failed */ if (load_quotas()) { refresh_disk_quota_usage(is_init); } + if (is_init) + ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); } /* * Update the disk usage of namespace and role. * Put the exceeded namespace and role into shared black map. + * Parameter 'is_init' is true when it's the first time that worker + * process is constructing quota model. */ static void -refresh_disk_quota_usage(bool force) +refresh_disk_quota_usage(bool is_init) { bool connected = false; bool pushed_active_snap = false; @@ -448,13 +480,13 @@ refresh_disk_quota_usage(bool force) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("[diskquota] unable to connect to execute SPI query"))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; /* recalculate the disk usage of table, schema and role */ - calculate_table_disk_usage(force); + calculate_table_disk_usage(is_init); calculate_schema_disk_usage(); calculate_role_disk_usage(); /* flush local table_size_map to user table table_size */ @@ -486,237 +518,61 @@ refresh_disk_quota_usage(bool force) } /* - * Generate the new shared blacklist from the local_black_list which - * exceed the quota limit. - * local_black_list is used to reduce the lock race. + * Incremental way to update the disk quota of every database objects + * Recalculate the table's disk usage when it's a new table or active table. + * Detect the removed table if it's no longer in pg_class. + * If change happens, no matter size change or owner change, + * update namespace_size_map and role_size_map correspondingly. + * Parameter 'is_init' set to true at initialization stage to fetch tables + * size from table table_size */ static void -flush_local_black_map(void) +calculate_table_disk_usage(bool is_init) { + bool table_size_map_found; + bool active_tbl_found; + int64 updated_total_size; + Relation classRel; + HeapTuple tuple; + HeapScanDesc relScan; + TableSizeEntry *tsentry = NULL; + Oid relOid; HASH_SEQ_STATUS iter; - LocalBlackMapEntry *localblackentry; - BlackMapEntry *blackentry; - bool found; + HTAB *local_active_table_stat_map; + DiskQuotaActiveTableEntry *active_table_entry; - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + classRel = heap_open(RelationRelationId, AccessShareLock); + relScan = heap_beginscan_catalog(classRel, 0, NULL); - hash_seq_init(&iter, local_disk_quota_black_map); - while ((localblackentry = hash_seq_search(&iter)) != NULL) + /* + * initialization stage all the tables are active. later loop, only the + * tables whose disk size changed will be treated as active + */ + local_active_table_stat_map = gp_fetch_active_tables(is_init); + + /* + * unset is_exist flag for tsentry in table_size_map this is used to + * detect tables which have been dropped. + */ + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) { - if (localblackentry->isexceeded) - { - blackentry = (BlackMapEntry *) hash_search(disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_ENTER_NULL, &found); - if (blackentry == NULL) - { - elog(WARNING, "Shared disk quota black map size limit reached." - "Some out-of-limit schemas or roles will be lost" - "in blacklist."); - } - else - { - /* new db objects which exceed quota limit */ - if (!found) - { - blackentry->targetoid = localblackentry->keyitem.targetoid; - blackentry->databaseoid = MyDatabaseId; - blackentry->targettype = localblackentry->keyitem.targettype; - } - } - localblackentry->isexceeded = false; - } - else - { - /* db objects are removed or under quota limit in the new loop */ - (void) hash_search(disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_REMOVE, NULL); - (void) hash_search(local_disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_REMOVE, NULL); - } + tsentry->is_exist = false; } - LWLockRelease(diskquota_locks.black_map_lock); -} -/* - * Compare the disk quota limit and current usage of a database object. - * Put them into local blacklist if quota limit is exceeded. - */ -static void -check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) -{ - bool found; - int32 quota_limit_mb; - int32 current_usage_mb; - LocalBlackMapEntry *localblackentry; - BlackMapEntry keyitem; + /* + * scan pg_class to detect table event: drop, reset schema, reset owenr. + * calculate the file size for active table and update namespace_size_map + * and role_size_map + */ + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) + { + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - QuotaLimitEntry *quota_entry; - - if (type == NAMESPACE_QUOTA) - { - quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, - &targetOid, - HASH_FIND, &found); - } - else if (type == ROLE_QUOTA) - { - quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, - &targetOid, - HASH_FIND, &found); - } - else - { - /* skip check if not namespace or role quota */ - return; - } - - if (!found) - { - /* default no limit */ - return; - } - - quota_limit_mb = quota_entry->limitsize; - current_usage_mb = current_usage / (1024 * 1024); - if (current_usage_mb >= quota_limit_mb) - { - memset(&keyitem, 0, sizeof(BlackMapEntry)); - keyitem.targetoid = targetOid; - keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = (uint32) type; - elog(DEBUG1, "Put object %u to blacklist with quota limit:%d, current usage:%d", - targetOid, quota_limit_mb, current_usage_mb); - localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, - &keyitem, - HASH_ENTER, &found); - localblackentry->isexceeded = true; - } - -} - -/* - * Remove a namespace from local namespace_size_map - */ -static void -remove_namespace_map(Oid namespaceoid) -{ - hash_search(namespace_size_map, - &namespaceoid, - HASH_REMOVE, NULL); -} - -/* - * Update the current disk usage of a namespace in namespace_size_map. - */ -static void -update_namespace_map(Oid namespaceoid, int64 updatesize) -{ - bool found; - NamespaceSizeEntry *nsentry; - - nsentry = (NamespaceSizeEntry *) hash_search(namespace_size_map, - &namespaceoid, - HASH_ENTER, &found); - if (!found) - { - nsentry->namespaceoid = namespaceoid; - nsentry->totalsize = updatesize; - } - else - { - nsentry->totalsize += updatesize; - } - -} - -/* - * Remove a namespace from local role_size_map - */ -static void -remove_role_map(Oid owneroid) -{ - hash_search(role_size_map, - &owneroid, - HASH_REMOVE, NULL); -} - -/* - * Update the current disk usage of a namespace in role_size_map. - */ -static void -update_role_map(Oid owneroid, int64 updatesize) -{ - bool found; - RoleSizeEntry *rolentry; - - rolentry = (RoleSizeEntry *) hash_search(role_size_map, - &owneroid, - HASH_ENTER, &found); - if (!found) - { - rolentry->owneroid = owneroid; - rolentry->totalsize = updatesize; - } - else - { - rolentry->totalsize += updatesize; - } - -} - -/* - * Incremental way to update the disk quota of every database objects - * Recalculate the table's disk usage when it's a new table or active table. - * Detect the removed table if it's no longer in pg_class. - * If change happens, no matter size change or owner change, - * update namespace_size_map and role_size_map correspondingly. - * Parameter 'force' set to true at initialization stage to recalculate - * the file size of all the tables. - * - */ -static void -calculate_table_disk_usage(bool is_init) -{ - bool found; - bool active_tbl_found = false; - Relation classRel; - HeapTuple tuple; - HeapScanDesc relScan; - TableSizeEntry *tsentry = NULL; - Oid relOid; - HASH_SEQ_STATUS iter; - HTAB *local_active_table_stat_map; - DiskQuotaActiveTableEntry *active_table_entry; - - classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); - - local_active_table_stat_map = gp_fetch_active_tables(is_init); - - /* unset is_exist flag for tsentry in table_size_map */ - hash_seq_init(&iter, table_size_map); - while ((tsentry = hash_seq_search(&iter)) != NULL) - { - tsentry->is_exist = false; - } - - /* - * scan pg_class to detect table event: drop, reset schema, reset owenr. - * calculate the file size for active table and update namespace_size_map - * and role_size_map - */ - while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) - { - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - - found = false; - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW) - continue; - relOid = HeapTupleGetOid(tuple); + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW) + continue; + relOid = HeapTupleGetOid(tuple); /* ignore system table */ if (relOid < FirstNormalObjectId) @@ -724,10 +580,11 @@ calculate_table_disk_usage(bool is_init) tsentry = (TableSizeEntry *) hash_search(table_size_map, &relOid, - HASH_ENTER, &found); + HASH_ENTER, &table_size_map_found); - if (!found) + if (!table_size_map_found) { + tsentry->reloid = relOid; tsentry->totalsize = 0; tsentry->owneroid = 0; tsentry->namespaceoid = 0; @@ -740,38 +597,19 @@ calculate_table_disk_usage(bool is_init) active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &relOid, HASH_FIND, &active_tbl_found); - /* - * skip to recalculate the tables which are not in active list and not - * at initializatio stage - */ + /* skip to recalculate the tables which are not in active list */ if (active_tbl_found) { + /* firstly calculate the updated total size of a table */ + updated_total_size = active_table_entry->tablesize - tsentry->totalsize; - /* namespace and owner may be changed since last check */ - if (!found) - { - /* if it's a new table */ - tsentry->reloid = relOid; - tsentry->namespaceoid = classForm->relnamespace; - tsentry->owneroid = classForm->relowner; - tsentry->totalsize = (int64) active_table_entry->tablesize; - tsentry->need_flush = true; - update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); - update_role_map(tsentry->owneroid, tsentry->totalsize); - } - else - { - /* - * if not new table in table_size_map, it must be in active - * table list - */ - int64 oldtotalsize = tsentry->totalsize; - - tsentry->totalsize = (int64) active_table_entry->tablesize; - tsentry->need_flush = true; - update_namespace_map(tsentry->namespaceoid, tsentry->totalsize - oldtotalsize); - update_role_map(tsentry->owneroid, tsentry->totalsize - oldtotalsize); - } + /* update the table_size entry */ + tsentry->totalsize = (int64) active_table_entry->tablesize; + tsentry->need_flush = true; + + /* update the disk usage of namespace and owner */ + update_namespace_map(tsentry->namespaceoid, updated_total_size); + update_role_map(tsentry->owneroid, updated_total_size); } /* table size info doesn't need to flush at init quota model stage */ @@ -869,30 +707,13 @@ calculate_role_disk_usage(void) } } -/* - * Make sure a StringInfo's string is no longer than 'nchars' characters. - */ -static void -truncateStringInfo(StringInfo str, int nchars) -{ - if (str && - str->len > nchars) - { - Assert(str->data != NULL && - str->len <= str->maxlen); - str->len = nchars; - str->data[nchars] = '\0'; - } -} - /* * Flush the table_size_map to user table diskquota.table_size * To improve update performance, we first delete all the need_to_flush * entries in table table_size. And then insert new table size entries into * table table_size. */ -static -void +static void flush_to_table_size(void) { HASH_SEQ_STATUS iter; @@ -940,17 +761,210 @@ flush_to_table_size(void) if (delete_statement_flag) { - elog(DEBUG1, "[diskquota] table_size delete_statement: %s", delete_statement.data); ret = SPI_execute(delete_statement.data, false, 0); if (ret != SPI_OK_DELETE) - elog(ERROR, "[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); } if (insert_statement_flag) { - elog(DEBUG1, "[diskquota] table_size insert_statement: %s", insert_statement.data); ret = SPI_execute(insert_statement.data, false, 0); if (ret != SPI_OK_INSERT) - elog(ERROR, "[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); + } +} + +/* + * Generate the new shared blacklist from the local_black_list which + * exceed the quota limit. + * local_black_list is used to reduce the lock race. + */ +static void +flush_local_black_map(void) +{ + HASH_SEQ_STATUS iter; + LocalBlackMapEntry *localblackentry; + BlackMapEntry *blackentry; + bool found; + + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + + hash_seq_init(&iter, local_disk_quota_black_map); + while ((localblackentry = hash_seq_search(&iter)) != NULL) + { + if (localblackentry->isexceeded) + { + blackentry = (BlackMapEntry *) hash_search(disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_ENTER_NULL, &found); + if (blackentry == NULL) + { + ereport(WARNING, (errmsg("[diskquota] Shared disk quota black map size limit reached." + "Some out-of-limit schemas or roles will be lost" + "in blacklist."))); + } + else + { + /* new db objects which exceed quota limit */ + if (!found) + { + blackentry->targetoid = localblackentry->keyitem.targetoid; + blackentry->databaseoid = MyDatabaseId; + blackentry->targettype = localblackentry->keyitem.targettype; + } + } + localblackentry->isexceeded = false; + } + else + { + /* db objects are removed or under quota limit in the new loop */ + (void) hash_search(disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_REMOVE, NULL); + (void) hash_search(local_disk_quota_black_map, + (void *) &localblackentry->keyitem, + HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.black_map_lock); +} + +/* + * Compare the disk quota limit and current usage of a database object. + * Put them into local blacklist if quota limit is exceeded. + */ +static void +check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) +{ + bool found; + int32 quota_limit_mb; + int32 current_usage_mb; + LocalBlackMapEntry *localblackentry; + BlackMapEntry keyitem; + + QuotaLimitEntry *quota_entry; + + if (type == NAMESPACE_QUOTA) + { + quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, + &targetOid, + HASH_FIND, &found); + } + else if (type == ROLE_QUOTA) + { + quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, + &targetOid, + HASH_FIND, &found); + } + else + return; /* skip check if not namespace or role quota */ + + /* default no limit */ + if (!found) + return; + + quota_limit_mb = quota_entry->limitsize; + current_usage_mb = current_usage / (1024 * 1024); + if (current_usage_mb >= quota_limit_mb) + { + memset(&keyitem, 0, sizeof(BlackMapEntry)); + keyitem.targetoid = targetOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.targettype = (uint32) type; + ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist with quota limit:%d, current usage:%d", + targetOid, quota_limit_mb, current_usage_mb))); + localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, + &keyitem, + HASH_ENTER, &found); + localblackentry->isexceeded = true; + } + +} + +/* + * Remove a namespace from local namespace_size_map + */ +static void +remove_namespace_map(Oid namespaceoid) +{ + hash_search(namespace_size_map, + &namespaceoid, + HASH_REMOVE, NULL); +} + +/* + * Update the current disk usage of a namespace in namespace_size_map. + */ +static void +update_namespace_map(Oid namespaceoid, int64 updatesize) +{ + bool found; + NamespaceSizeEntry *nsentry; + + nsentry = (NamespaceSizeEntry *) hash_search(namespace_size_map, + &namespaceoid, + HASH_ENTER, &found); + if (!found) + { + nsentry->namespaceoid = namespaceoid; + nsentry->totalsize = updatesize; + } + else + { + nsentry->totalsize += updatesize; + } + +} + +/* + * Remove a namespace from local role_size_map + */ +static void +remove_role_map(Oid owneroid) +{ + hash_search(role_size_map, + &owneroid, + HASH_REMOVE, NULL); +} + +/* + * Update the current disk usage of a namespace in role_size_map. + */ +static void +update_role_map(Oid owneroid, int64 updatesize) +{ + bool found; + RoleSizeEntry *rolentry; + + rolentry = (RoleSizeEntry *) hash_search(role_size_map, + &owneroid, + HASH_ENTER, &found); + if (!found) + { + rolentry->owneroid = owneroid; + rolentry->totalsize = updatesize; + } + else + { + rolentry->totalsize += updatesize; + } + +} + +/* + * Make sure a StringInfo's string is no longer than 'nchars' characters. + */ +static void +truncateStringInfo(StringInfo str, int nchars) +{ + if (str && + str->len > nchars) + { + Assert(str->data != NULL && + str->len <= str->maxlen); + str->len = nchars; + str->data[nchars] = '\0'; } } @@ -977,7 +991,7 @@ load_quotas(void) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("[diskquota] unable to connect to execute SPI query"))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -1042,9 +1056,13 @@ do_load_quotas(void) HASH_REMOVE, NULL); } + /* + * read quotas from diskquota.quota_config + */ ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "[diskquota] load_quotas SPI_execute failed: error code %d", ret); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 3 || @@ -1052,9 +1070,10 @@ do_load_quotas(void) ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) { - elog(ERROR, "[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } for (i = 0; i < SPI_processed; i++) @@ -1122,7 +1141,7 @@ get_rel_owner_schema(Oid relid, Oid *ownerOid, Oid *nsOid) /* * Given table oid, check whether quota limit * of table's schema or table's owner are reached. - * Do enforcemet if quota exceeds. + * Do enforcement if quota exceeds. */ bool quota_check_common(Oid reloid) @@ -1140,6 +1159,7 @@ quota_check_common(Oid reloid) get_rel_owner_schema(reloid, &ownerOid, &nsOid); LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); + /* check schema quota */ if (nsOid != InvalidOid) { keyitem.targetoid = nsOid; @@ -1150,6 +1170,7 @@ quota_check_common(Oid reloid) HASH_FIND, &found); if (found) { + LWLockRelease(diskquota_locks.black_map_lock); ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name:%s", get_namespace_name(nsOid)))); @@ -1158,6 +1179,7 @@ quota_check_common(Oid reloid) } + /* check role quota */ if (ownerOid != InvalidOid) { keyitem.targetoid = ownerOid; @@ -1168,6 +1190,7 @@ quota_check_common(Oid reloid) HASH_FIND, &found); if (found) { + LWLockRelease(diskquota_locks.black_map_lock); ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(ownerOid)))); From b1678b8e93aa194b6fe883c935c4482c82cf9cda Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Thu, 11 Apr 2019 14:18:54 +0000 Subject: [PATCH 030/330] Using gp_dist_random to fetch table size from all segments. --- diskquota_utility.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 15e4b2f73f0..d9b5447da46 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -105,8 +105,8 @@ init_table_size_table(PG_FUNCTION_ARGS) resetStringInfo(&buf); appendStringInfo(&buf, "insert into diskquota.table_size " - "select oid, pg_total_relation_size(oid) from pg_class " - "where oid>= %u and (relkind='r' or relkind='m');", + "select oid, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') " + "where oid>= %u and (relkind='r' or relkind='m') group by oid;", FirstNormalObjectId); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_INSERT) From 208c0584b0a37e9d01aa99d93d5d28afdf6e2d14 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Fri, 12 Apr 2019 10:11:51 +0800 Subject: [PATCH 031/330] Refactor gp_activetable (#26) remove unused function get_all_tables_size. we now get table size info from table_size table when rebooting. add new function pull_active_table_size_from_seg to make gp_fetch_active_tables interface clear. add some comments Use ereport to replace elog. --- diskquota.c | 34 ++-- diskquota.h | 1 - gp_activetable.c | 452 ++++++++++++++++++++++------------------------- 3 files changed, 228 insertions(+), 259 deletions(-) diff --git a/diskquota.c b/diskquota.c index d97c49996e9..42c27987d6e 100644 --- a/diskquota.c +++ b/diskquota.c @@ -125,7 +125,7 @@ _PG_init(void) /* diskquota.so must be in shared_preload_libraries to init SHM. */ if (!process_shared_preload_libraries_in_progress) - elog(ERROR, "diskquota.so not in shared_preload_libraries."); + ereport(ERROR, (errmsg("diskquota.so not in shared_preload_libraries."))); init_disk_quota_shmem(); init_disk_quota_enforcement(); @@ -525,7 +525,7 @@ create_monitor_db_table(void) if (SPI_execute(sql, false, 0) != SPI_OK_UTILITY) { - elog(ERROR, "[diskquota launcher] SPI_execute error, sql:'%s', errno:%d", sql, errno); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql:'%s', errno:%d", sql, errno))); } } PG_CATCH(); @@ -572,13 +572,13 @@ start_workers_from_dblist(void) PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - elog(ERROR, "[diskquota launcher] SPI connect error, errno:%d", errno); + ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, errno:%d", errno))); ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "select diskquota_namespace.database_list"); + ereport(ERROR, (errmsg("select diskquota_namespace.database_list"))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) - elog(ERROR, "[diskquota launcher] table database_list corrupt, laucher will exit"); + ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit"))); for (i = 0; num < SPI_processed; i++) { @@ -590,15 +590,15 @@ start_workers_from_dblist(void) tup = SPI_tuptable->vals[i]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (isnull) - elog(ERROR, "[diskquota launcher] dbid cann't be null in table database_list"); + ereport(ERROR, (errmsg("[diskquota launcher] dbid cann't be null in table database_list"))); dbid = DatumGetObjectId(dat); if (!is_valid_dbid(dbid)) { - elog(LOG, "[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid); + ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid))); continue; } if (!start_worker_by_dboid(dbid)) - elog(ERROR, "[diskquota launcher] start worker process of database(oid:%u) failed", dbid); + ereport(ERROR, (errmsg("[diskquota launcher] start worker process of database(oid:%u) failed", dbid))); num++; /* @@ -607,7 +607,7 @@ start_workers_from_dblist(void) */ if (num >= MAX_NUM_MONITORED_DB) { - elog(LOG, "[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid); + ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid))); break; } } @@ -638,7 +638,7 @@ process_extension_ddl_message() if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) return; - elog(LOG, "[diskquota launcher]: received create/drop extension diskquota message"); + ereport(LOG, (errmsg("[diskquota launcher]: received create/drop extension diskquota message"))); do_process_extension_ddl_message(&code, local_extension_ddl_message); @@ -698,7 +698,7 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local *code = ERR_OK; break; default: - elog(LOG, "[diskquota launcher]:received unsupported message cmd=%d", local_extension_ddl_message.cmd); + ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", local_extension_ddl_message.cmd))); *code = ERR_UNKNOWN; break; } @@ -736,12 +736,12 @@ on_add_db(Oid dbid, MessageResult * code) if (num_db >= MAX_NUM_MONITORED_DB) { *code = ERR_EXCEED; - elog(ERROR, "[diskquota launcher] too many databases to monitor"); + ereport(ERROR, (errmsg("[diskquota launcher] too many databases to monitor"))); } if (!is_valid_dbid(dbid)) { *code = ERR_INVALID_DBID; - elog(ERROR, "[diskquota launcher] invalid database oid"); + ereport(ERROR, (errmsg("[diskquota launcher] invalid database oid"))); } /* @@ -762,7 +762,7 @@ on_add_db(Oid dbid, MessageResult * code) if (!start_worker_by_dboid(dbid)) { *code = ERR_START_WORKER; - elog(ERROR, "[diskquota launcher] failed to start worker - dbid=%u", dbid); + ereport(ERROR, (errmsg("[diskquota launcher] failed to start worker - dbid=%u", dbid))); } } @@ -779,7 +779,7 @@ on_del_db(Oid dbid, MessageResult * code) if (!is_valid_dbid(dbid)) { *code = ERR_INVALID_DBID; - elog(ERROR, "[diskquota launcher] invalid database oid"); + ereport(ERROR, (errmsg("[diskquota launcher] invalid database oid"))); } /* tell postmaster to stop this bgworker */ @@ -820,7 +820,7 @@ add_dbid_to_database_list(Oid dbid) ret = SPI_execute(str.data, false, 0); if (ret != SPI_OK_INSERT) { - elog(ERROR, "[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } return; } @@ -842,7 +842,7 @@ del_dbid_from_database_list(Oid dbid) ret = SPI_execute(str.data, false, 0); if (ret != SPI_OK_DELETE) { - elog(ERROR, "[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } } diff --git a/diskquota.h b/diskquota.h index ad2c373288b..4957131d53e 100644 --- a/diskquota.h +++ b/diskquota.h @@ -11,7 +11,6 @@ typedef enum typedef enum { - FETCH_ALL_SIZE, /* fetch size for all the tables */ FETCH_ACTIVE_OID, /* fetch active table list */ FETCH_ACTIVE_SIZE /* fetch size for active tables */ } FetchTableStatType; diff --git a/gp_activetable.c b/gp_activetable.c index 170c88452cc..573e8329909 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -1,8 +1,10 @@ /* ------------------------------------------------------------------------- * - * activetable.c + * gp_activetable.c * * This code is responsible for detecting active table for databases + * quotamodel will call gp_fetch_active_tables() to fetch the active tables + * and their size information in each loop. * * Copyright (c) 2018-Present Pivotal Software, Inc. * @@ -39,6 +41,7 @@ #include "gp_activetable.h" #include "diskquota.h" +PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); /* The results set cache for SRF call*/ typedef struct DiskQuotaSetOFCache @@ -49,7 +52,7 @@ typedef struct DiskQuotaSetOFCache HTAB *active_tables_map = NULL; -/* active table hooks*/ +/* active table hooks which detect the disk file size change. */ static file_create_hook_type prev_file_create_hook = NULL; static file_extend_hook_type prev_file_extend_hook = NULL; static file_truncate_hook_type prev_file_truncate_hook = NULL; @@ -58,13 +61,11 @@ static void active_table_hook_smgrcreate(RelFileNodeBackend rnode); static void active_table_hook_smgrextend(RelFileNodeBackend rnode); static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode); -PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); - static HTAB *get_active_tables_stats(ArrayType *array); -static HTAB *get_all_tables_size(void); -static HTAB *get_active_tables(void); -static StringInfoData convert_map_to_string(HTAB *active_list); +static HTAB *get_active_tables_oid(void); static HTAB *pull_active_list_from_seg(void); +static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array); +static StringInfoData convert_map_to_string(HTAB *active_list); static void load_table_size(HTAB *local_table_stats_map); static void report_active_table_helper(const RelFileNodeBackend *relFileNode); @@ -74,7 +75,28 @@ void init_lock_active_tables(void); HTAB *gp_fetch_active_tables(bool is_init); /* - * Register smgr hook to detect active table. + * Init active_tables_map shared memory + */ +void +init_shm_worker_active_tables(void) +{ + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hash = tag_hash; + + active_tables_map = ShmemInitHash("active_tables", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); +} + +/* + * Register disk file size change hook to detect active table. */ void init_active_table_hook(void) @@ -89,6 +111,9 @@ init_active_table_hook(void) file_truncate_hook = active_table_hook_smgrtruncate; } +/* + * File create hook is used to monitor a new file create event + */ static void active_table_hook_smgrcreate(RelFileNodeBackend rnode) { @@ -98,6 +123,11 @@ active_table_hook_smgrcreate(RelFileNodeBackend rnode) report_active_table_helper(&rnode); } +/* + * File extend hook is used to monitor file size extend event + * it could be extending a page for heap table or just monitoring + * file write for an append-optimize table. + */ static void active_table_hook_smgrextend(RelFileNodeBackend rnode) { @@ -107,6 +137,9 @@ active_table_hook_smgrextend(RelFileNodeBackend rnode) report_active_table_helper(&rnode); } +/* + * File truncate hook is used to monitor a new file truncate event + */ static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode) { @@ -117,30 +150,11 @@ active_table_hook_smgrtruncate(RelFileNodeBackend rnode) } /* - * Init active_tables_map shared memory - */ -void -init_shm_worker_active_tables(void) -{ - HASHCTL ctl; - - memset(&ctl, 0, sizeof(ctl)); - - ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.hash = tag_hash; - - active_tables_map = ShmemInitHash("active_tables", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); -} - -/* - * Common function for reporting active tables, used by smgr and ao + * Common function for reporting active tables + * Currently, any file events(create, extend. truncate) are + * treated the same and report_active_table_helper just put + * the corresponding relFileNode into the active_tables_map */ - static void report_active_table_helper(const RelFileNodeBackend *relFileNode) { @@ -157,7 +171,6 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); if (entry && !found) *entry = item; - LWLockRelease(diskquota_locks.active_table_lock); if (!found && entry == NULL) { @@ -167,20 +180,70 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) */ ereport(WARNING, (errmsg("Share memory is not enough for active tables."))); } + LWLockRelease(diskquota_locks.active_table_lock); +} + +/* + * Interface of activetable module + * This function is called by quotamodel module. + * Disk quota worker process need to collect + * active table disk usage from all the segments. + * And aggregate the table size on each segment + * to get the real table size at cluster level. + */ +HTAB * +gp_fetch_active_tables(bool is_init) +{ + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HTAB *local_active_table_oid_maps; + StringInfoData active_oid_list; + + Assert(Gp_role == GP_ROLE_DISPATCH); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + + local_table_stats_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + if (is_init) + { + load_table_size(local_table_stats_map); + } + else + { + /* step 1: fetch active oids from all the segments */ + local_active_table_oid_maps = pull_active_list_from_seg(); + active_oid_list = convert_map_to_string(local_active_table_oid_maps); + + /* step 2: fetch active table sizes based on active oids */ + pull_active_table_size_from_seg(local_table_stats_map, active_oid_list.data); + + hash_destroy(local_active_table_oid_maps); + pfree(active_oid_list.data); + } + return local_table_stats_map; } /* * Function to get the table size from each segments - * There are two mode: 1. calcualte disk usage for all - * the tables, which is called when init the disk quota model. - * 2. calculate the active table size when refreshing the - * disk quota model. + * There are three mode: + * 1. gather active table oid from all the segments, since table may only + * be modified on a subset of the segments, we need to firstly gather the + * active table oid list from all the segments. + * 2. calculate the active table size based on the active table oid list. */ Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; - int32 model = PG_GETARG_INT32(0); + int32 mode = PG_GETARG_INT32(0); AttInMetadata *attinmeta; bool isFirstCall = true; @@ -205,19 +268,16 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) ereport(ERROR, (errmsg("This function must not be called on master or by user"))); } - switch (model) + switch (mode) { - case FETCH_ALL_SIZE: - localCacheTable = get_all_tables_size(); - break; case FETCH_ACTIVE_OID: - localCacheTable = get_active_tables(); + localCacheTable = get_active_tables_oid(); break; case FETCH_ACTIVE_SIZE: localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); break; default: - ereport(ERROR, (errmsg("Unused model number, transaction will be aborted"))); + ereport(ERROR, (errmsg("Unused mode number, transaction will be aborted"))); break; } @@ -338,6 +398,10 @@ get_active_tables_stats(ArrayType *array) for (i = 0; i < nitems; i++) { + /* + * handle array containing NULL case for general inupt, but the active + * table oid array would not contain NULL in fact + */ if (bitmap && (*bitmap & bitmask) == 0) { continue; @@ -355,6 +419,7 @@ get_active_tables_stats(ArrayType *array) */ PG_TRY(); { + /* call pg_total_relation_size to get the active table size */ entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); } @@ -385,73 +450,15 @@ get_active_tables_stats(ArrayType *array) return local_table; } - -HTAB * -get_all_tables_size(void) -{ - HTAB *local_table_stats_map = NULL; - HASHCTL ctl; - HeapTuple tuple; - Relation classRel; - HeapScanDesc relScan; - - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - - classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); - - - while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) - { - Oid relOid; - DiskQuotaActiveTableEntry *entry; - - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW) - continue; - relOid = HeapTupleGetOid(tuple); - - /* ignore system table */ - if (relOid < FirstNormalObjectId) - continue; - - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &relOid, HASH_ENTER, NULL); - - entry->tableoid = relOid; - entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, - ObjectIdGetDatum(relOid))); - - } - - heap_endscan(relScan); - heap_close(classRel, AccessShareLock); - - return local_table_stats_map; -} - - /* * Get local active table with table oid and table size info. * This function first copies active table map from shared memory * to local active table map with refilenode info. Then traverses * the local map and find corresponding table oid and table file - * size. Finnaly stores them into local active table map and return. + * size. Finally stores them into local active table map and return. */ -HTAB * -get_active_tables(void) +static HTAB * +get_active_tables_oid(void) { HASHCTL ctl; HTAB *local_active_table_file_map = NULL; @@ -478,6 +485,7 @@ get_active_tables(void) hash_seq_init(&iter, active_tables_map); + /* copy active table from shared memory into local memory */ while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) { bool found; @@ -507,13 +515,12 @@ get_active_tables(void) &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - /* traverse local active table map and calculate their file size. */ - hash_seq_init(&iter, local_active_table_file_map); - /* * scan whole local map, get the oid of each table and calculate the size * of them */ + hash_seq_init(&iter, local_active_table_file_map); + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) { bool found; @@ -532,8 +539,8 @@ get_active_tables(void) } /* - * If cannot convert relfilenode to relOid, put them back and wait for the - * next check. + * If cannot convert relfilenode to relOid, put them back to shared memory + * and wait for the next check. */ if (hash_get_num_entries(local_active_table_file_map) > 0) { @@ -556,6 +563,8 @@ get_active_tables(void) /* * Load table size info from diskquota.table_size table. + * This is called when system startup, disk quota black list + * and other shared memory will be warmed up by table_size table. */ static void load_table_size(HTAB *local_table_stats_map) @@ -566,34 +575,21 @@ load_table_size(HTAB *local_table_stats_map) bool found; DiskQuotaActiveTableEntry *quota_entry; - RangeVar *rv; - Relation rel; - - rv = makeRangeVar("diskquota", "table_size", -1); - rel = heap_openrv_extended(rv, AccessShareLock, true); - if (!rel) - { - /* configuration table is missing. */ - elog(ERROR, "[diskquota] table \"table_size\" is missing in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); - } - heap_close(rel, AccessShareLock); - ret = SPI_execute("select tableid, size from diskquota.table_size", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "[diskquota] load_table_size SPI_execute failed: error code %d", ret); + ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: error code %d", errno))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 2 || ((tupdesc)->attrs[0])->atttypid != OIDOID || ((tupdesc)->attrs[1])->atttypid != INT8OID) { - elog(ERROR, "[diskquota] table \"table_size\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); + ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } + /* push the table oid and size into local_table_stats_map */ for (i = 0; i < SPI_processed; i++) { HeapTuple tup = SPI_tuptable->vals[i]; @@ -623,116 +619,24 @@ load_table_size(HTAB *local_table_stats_map) return; } -/* - * Worker process at master need to collect - * active table disk usage from all the segments. - * And aggregate the table size on each segment - * to obtainer the real table size at cluster level. - */ -HTAB * -gp_fetch_active_tables(bool is_init) -{ - CdbPgResults cdb_pgresults = {NULL, 0}; - int i, - j; - char *sql; - HTAB *local_table_stats_map = NULL; - HASHCTL ctl; - HTAB *local_active_table_maps; - StringInfoData buffer; - StringInfoData map_string; - - Assert(Gp_role == GP_ROLE_DISPATCH); - - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - if (is_init) - { - load_table_size(local_table_stats_map); - } - else - { - local_active_table_maps = pull_active_list_from_seg(); - map_string = convert_map_to_string(local_active_table_maps); - initStringInfo(&buffer); - appendStringInfo(&buffer, "select * from diskquota.diskquota_fetch_table_stat(2, '%s'::oid[])", - map_string.data); - sql = buffer.data; - - CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); - - /* collect data from each segment */ - for (i = 0; i < cdb_pgresults.numResults; i++) - { - - Size tableSize; - bool found; - Oid tableOid; - DiskQuotaActiveTableEntry *entry; - - struct pg_result *pgresult = cdb_pgresults.pg_results[i]; - - if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) - { - cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("unexpected result from segment: %d", - PQresultStatus(pgresult)))); - } - - for (j = 0; j < PQntuples(pgresult); j++) - { - tableOid = atooid(PQgetvalue(pgresult, j, 0)); - tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); - - entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, &tableOid, HASH_ENTER, &found); - - if (!found) - { - entry->tableoid = tableOid; - entry->tablesize = tableSize; - } - else - { - entry->tablesize = entry->tablesize + tableSize; - } - - } - } - cdbdisp_clearCdbPgResults(&cdb_pgresults); - } - return local_table_stats_map; -} - - /* * Convert a hash map with oids into a string array * This function is used to prepare the second array parameter * of function diskquota_fetch_table_stat. */ static StringInfoData -convert_map_to_string(HTAB *active_list) +convert_map_to_string(HTAB *local_active_table_oid_maps) { HASH_SEQ_STATUS iter; StringInfoData buffer; DiskQuotaActiveTableEntry *entry; uint32 count = 0; - uint32 nitems = hash_get_num_entries(active_list); + uint32 nitems = hash_get_num_entries(local_active_table_oid_maps); initStringInfo(&buffer); appendStringInfo(&buffer, "{"); - hash_seq_init(&iter, active_list); + hash_seq_init(&iter, local_active_table_oid_maps); while ((entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&iter)) != NULL) { @@ -753,13 +657,10 @@ convert_map_to_string(HTAB *active_list) /* - * Get active table list from all the segments. - * Since when loading data, there is case where only subset for - * segment doing the real loading. As a result, the same table - * maybe active on some segemnts while not active on others. We - * haven't store the table size for each segment on master(to save - * memory), so when re-calcualte the table size, we need to sum the - * table size on all of the segments. + * Get active table size from all the segments based on + * active table oid list. + * Function diskquota_fetch_table_stat is called to calculate + * the table size on the fly. */ static HTAB * pull_active_list_from_seg(void) @@ -767,8 +668,8 @@ pull_active_list_from_seg(void) CdbPgResults cdb_pgresults = {NULL, 0}; int i, j; - char *sql; - HTAB *local_table_stats_map = NULL; + char *sql = NULL; + HTAB *local_active_table_oid_map = NULL; HASHCTL ctl; DiskQuotaActiveTableEntry *entry; @@ -778,19 +679,20 @@ pull_active_list_from_seg(void) ctl.hcxt = CurrentMemoryContext; ctl.hash = oid_hash; - local_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_active_table_oid_map = hash_create("local active table map with relfilenode info", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - sql = "select * from diskquota.diskquota_fetch_table_stat(1, '{}'::oid[])"; + /* first get all oid of tables which are active table on any segment */ + sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; + /* any errors will be catch in upper level */ CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); for (i = 0; i < cdb_pgresults.numResults; i++) { - Oid tableOid; bool found; @@ -800,25 +702,93 @@ pull_active_list_from_seg(void) { cdbdisp_clearCdbPgResults(&cdb_pgresults); ereport(ERROR, - (errmsg("unexpected result from segment: %d", + (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", PQresultStatus(pgresult)))); } + /* push the active table oid into local_active_table_oid_map */ for (j = 0; j < PQntuples(pgresult); j++) { tableOid = atooid(PQgetvalue(pgresult, j, 0)); - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table_stats_map, &tableOid, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_oid_map, &tableOid, HASH_ENTER, &found); if (!found) { entry->tableoid = tableOid; entry->tablesize = 0; } - } } cdbdisp_clearCdbPgResults(&cdb_pgresults); - return local_table_stats_map; + return local_active_table_oid_map; +} + +/* + * Get active table list from all the segments. + * Since when loading data, there is case where only subset for + * segment doing the real loading. As a result, the same table + * maybe active on some segments while not active on others. We + * haven't store the table size for each segment on master(to save + * memory), so when re-calculate the table size, we need to sum the + * table size on all of the segments. + */ +static void +pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + StringInfoData sql_command; + int i; + int j; + + initStringInfo(&sql_command); + appendStringInfo(&sql_command, "select * from diskquota.diskquota_fetch_table_stat(1, '%s'::oid[])", + active_oid_array); + CdbDispatchCommand(sql_command.data, DF_NONE, &cdb_pgresults); + pfree(sql_command.data); + + /* sum table size from each segment into local_table_stats_map */ + for (i = 0; i < cdb_pgresults.numResults; i++) + { + + Size tableSize; + bool found; + Oid tableOid; + DiskQuotaActiveTableEntry *entry; + + struct pg_result *pgresult = cdb_pgresults.pg_results[i]; + + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", + PQresultStatus(pgresult)))); + } + + for (j = 0; j < PQntuples(pgresult); j++) + { + tableOid = atooid(PQgetvalue(pgresult, j, 0)); + tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + + entry = (DiskQuotaActiveTableEntry *) hash_search( + local_table_stats_map, &tableOid, HASH_ENTER, &found); + + if (!found) + { + /* receive table size info from the first segment */ + entry->tableoid = tableOid; + entry->tablesize = tableSize; + } + else + { + /* sum table size from all the segments */ + entry->tablesize = entry->tablesize + tableSize; + } + + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); + return; } From 99f73308cf441c5aaf028e0e3dbdb52b1ac5aaef Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Mon, 15 Apr 2019 09:51:59 +0000 Subject: [PATCH 032/330] Rename database_size_view to make it compatible with schema. --- diskquota--1.0.sql | 2 +- expected/test_fast_disk_check.out | 2 +- sql/test_fast_disk_check.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index f57e59773a7..692979620de 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -55,7 +55,7 @@ from diskquota.table_size as ts, WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; -CREATE VIEW diskquota.database_size_view AS +CREATE VIEW diskquota.show_fast_database_size_view AS SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); diff --git a/expected/test_fast_disk_check.out b/expected/test_fast_disk_check.out index c2560e9036c..430cf48d0fe 100644 --- a/expected/test_fast_disk_check.out +++ b/expected/test_fast_disk_check.out @@ -11,7 +11,7 @@ SELECT pg_sleep(10); (1 row) -SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; ?column? ---------- f diff --git a/sql/test_fast_disk_check.sql b/sql/test_fast_disk_check.sql index 12c0704cab5..1e973ada00f 100644 --- a/sql/test_fast_disk_check.sql +++ b/sql/test_fast_disk_check.sql @@ -5,7 +5,7 @@ SET search_path to s1; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,200000); SELECT pg_sleep(10); -SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.database_size_view WHERE datname='contrib_regression'; +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; RESET search_path; DROP TABLE s1.a; DROP SCHEMA s1; From 3d6162f526de29a8586a8178b39d7894461f7056 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Tue, 16 Apr 2019 15:13:16 +0800 Subject: [PATCH 033/330] Add release pipeline for diskquota. --- concourse/pipeline/release_pipeline.yml | 177 ++++++++++++++++++++++++ concourse/scripts/build_diskquota.sh | 19 ++- concourse/scripts/test_diskquota.sh | 6 +- concourse/tasks/build_diskquota.yml | 1 + concourse/tasks/test_diskquota.yml | 1 + expected/test_primary_failure.out | 8 +- sql/test_primary_failure.sql | 6 +- 7 files changed, 212 insertions(+), 6 deletions(-) create mode 100644 concourse/pipeline/release_pipeline.yml diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml new file mode 100644 index 00000000000..8b32d1b1986 --- /dev/null +++ b/concourse/pipeline/release_pipeline.yml @@ -0,0 +1,177 @@ +## ====================================================================== +## Pipeline for GPDB PL/R GPPKG +## ====================================================================== + +groups: +- name: GPDB6 + jobs: + - diskquota_centos6_build + - diskquota_centos7_build + - diskquota_centos6_test_release + - diskquota_centos7_test_release + +resource_types: +- name: gcs + type: docker-image + source: + repository: frodenas/gcs-resource + +resources: + +# Image Resources + +- name: centos-gpdb-dev-6 + type: docker-image + source: + repository: pivotaldata/centos-gpdb-dev + tag: '6-gcc6.2-llvm3.7' + +- name: centos-gpdb-dev-7 + type: docker-image + source: + repository: pivotaldata/centos-gpdb-dev + tag: '7-gcc6.2-llvm3.7' + +# Github Source Codes + +- name: gpdb_src + type: git + source: + branch: {{gpdb-git-branch}} + uri: {{gpdb-git-remote}} + +- name: diskquota_src + type: git + source: + branch: gpdb + uri: https://github.com/greenplum-db/diskquota.git + tag_filter: 1.* + +# gpdb binary on gcs is located as different folder for different version +# use gcs_gpdb_binary_folder to specify them. +- name: bin_gpdb_centos6 + type: gcs + source: + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz + +- name: bin_gpdb_centos7 + type: gcs + source: + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz + +- name: bin_diskquota_centos7 + type: gcs + source: + bucket: {{gcs-bucket}} + json_key: {{concourse-gcs-resources-service-account-key}} + regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel7-x86_64.tar.gz + +- name: bin_diskquota_centos6 + type: gcs + source: + bucket: {{gcs-bucket}} + json_key: {{concourse-gcs-resources-service-account-key}} + regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel6-x86_64.tar.gz + +## jobs +## ====================================================================== + +jobs: +# Build PLR GPPKG + +- name: diskquota_centos7_build + max_in_flight: 3 + plan: + - aggregate: + - get: centos-gpdb-dev-7 + - get: diskquota_src + trigger: true + - get: bin_gpdb_centos7 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-7 + input_mapping: + bin_gpdb: bin_gpdb_centos7 + output_mapping: + bin_diskquota: bin_diskquota_centos7 + params: + OSVER: centos7 + GPDBVER: gp6 + DEV_RELEASE: release + - aggregate: + - put: bin_diskquota_centos7 + params: + file: diskquota_artifacts/diskquota*.tar.gz + +- name: diskquota_centos6_build + max_in_flight: 3 + plan: + - aggregate: + - get: centos-gpdb-dev-6 + - get: diskquota_src + trigger: true + - get: bin_gpdb_centos6 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-6 + input_mapping: + bin_gpdb: bin_gpdb_centos6 + output_mapping: + bin_diskquota: bin_diskquota_centos6 + params: + OSVER: centos6 + GPDBVER: gp6 + DEV_RELEASE: release + - aggregate: + - put: bin_diskquota_centos6 + params: + file: diskquota_artifacts/diskquota*.tar.gz + + +- name: diskquota_centos6_test_release + plan: + - aggregate: + - get: centos-gpdb-dev-6 + - get: diskquota_src + - get: bin_diskquota_centos6 + - get: bin_gpdb_centos6 + trigger: true + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-6 + input_mapping: + bin_gpdb: bin_gpdb_centos6 + bin_diskquota: bin_diskquota_centos6 + params: + OSVER: centos6 + GPDBVER: gp6 + DEV_RELEASE: release + +- name: diskquota_centos7_test_release + plan: + - aggregate: + - get: centos-gpdb-dev-7 + - get: diskquota_src + - get: bin_diskquota_centos7 + - get: bin_gpdb_centos7 + trigger: true + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-7 + input_mapping: + bin_gpdb: bin_gpdb_centos7 + bin_diskquota: bin_diskquota_centos7 + params: + OSVER: centos7 + GPDBVER: gp6 + DEV_RELEASE: release diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index a75f9a9d431..2feed00b5f4 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -12,10 +12,16 @@ function pkg() { export USE_PGXS=1 pushd diskquota_src/ + if [ "${DEV_RELEASE}" == "release" ]; then + if git describe --tags >/dev/null 2>&1 ; then + echo "git describe failed" || exit 1 + fi + DISKQUOTA_VERSION=$(git describe --tags) + fi make clean make install popd - + pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component @@ -25,6 +31,17 @@ function pkg() { share/postgresql/extension/diskquota--1.0.sql \ install_gpdb_component popd + if [ "${DEV_RELEASE}" == "release" ]; then + case "$OSVER" in + centos6) + cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel6-x86_64.tar.gz + ;; + centos7) + cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel7-x86_64.tar.gz + ;; + *) echo "Unknown OS: $OSVER"; exit 1 ;; + esac + fi } function _main() { diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 83a30a267fe..573a60395fc 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -51,7 +51,11 @@ function setup_gpadmin_user() { } function install_diskquota() { - tar -xzf bin_diskquota/component_diskquota.tar.gz -C /usr/local/greenplum-db-devel + if [ "${DEV_RELEASE}" == "release" ]; then + tar -xzf bin_diskquota/diskquota*.tar.gz -C /usr/local/greenplum-db-devel + else + tar -xzf bin_diskquota/component_diskquota.tar.gz -C /usr/local/greenplum-db-devel + fi } function _main() { time install_gpdb diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index 56ad6486111..dc483709650 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -14,3 +14,4 @@ run: params: OSVER: GPDBVER: + DEV_RELEASE: devel diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml index 372d51dafb9..e32abaa42d3 100644 --- a/concourse/tasks/test_diskquota.yml +++ b/concourse/tasks/test_diskquota.yml @@ -12,3 +12,4 @@ run: params: OSVER: GPDBVER: + DEV_RELEASE: devel diff --git a/expected/test_primary_failure.out b/expected/test_primary_failure.out index 7dfdfd0469b..6eda03f54ab 100644 --- a/expected/test_primary_failure.out +++ b/expected/test_primary_failure.out @@ -246,7 +246,12 @@ select pg_recoverseg((select datadir from gp_segment_configuration c where c.rol (1 row) --- end_ignore +select pg_sleep(10); + pg_sleep +---------- + +(1 row) + -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; content | preferred_role | role | status | mode @@ -262,6 +267,7 @@ SELECT pg_sleep(10); (1 row) +-- end_ignore SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; quota_in_mb | nspsize_in_bytes -------------+------------------ diff --git a/sql/test_primary_failure.sql b/sql/test_primary_failure.sql index ed1e358827e..ddc0f1574fc 100644 --- a/sql/test_primary_failure.sql +++ b/sql/test_primary_failure.sql @@ -58,12 +58,12 @@ select pg_sleep(15); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); select pg_sleep(10); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); --- end_ignore - +select pg_sleep(10); -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; --- no sleep, it will take effect immediately SELECT pg_sleep(10); +-- end_ignore + SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; INSERT INTO a SELECT generate_series(1,100); From 55b942bdb306472b95a821fb9530cf484e17bb87 Mon Sep 17 00:00:00 2001 From: Weinan WANG Date: Fri, 19 Apr 2019 15:01:25 +0800 Subject: [PATCH 034/330] calculate table size including QD side file size (#32) * calculate table size including QD side file size For toast table, `QD` side also has some table file. pretend diskquota process as entrydb and grab these small file size. * Add a test case for table_size table promise `table_size` table can provide same value as `pg_total_relation_size` --- diskquota_schedule | 1 + diskquota_utility.c | 34 ++++++++++++++++++++++++++++++---- expected/test_table_size.out | 22 ++++++++++++++++++++++ quotamodel.c | 7 +++++++ sql/test_table_size.sql | 14 ++++++++++++++ 5 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 expected/test_table_size.out create mode 100644 sql/test_table_size.sql diff --git a/diskquota_schedule b/diskquota_schedule index 5f5f97accc4..f1d01e9174f 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -1,5 +1,6 @@ test: init test: prepare +test: test_table_size test: test_fast_disk_check test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_truncate diff --git a/diskquota_utility.c b/diskquota_utility.c index d9b5447da46..deefdebc8c0 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -101,13 +101,39 @@ init_table_size_table(PG_FUNCTION_ARGS) if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete table_size table: error code %d", ret); - /* fill table_size table with table oid and size info. */ + /* fetch table size */ resetStringInfo(&buf); appendStringInfo(&buf, - "insert into diskquota.table_size " - "select oid, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') " - "where oid>= %u and (relkind='r' or relkind='m') group by oid;", + "select oid, pg_total_relation_size(oid)" + " from pg_class" + " where oid >= %u and (relkind='r' or relkind='m')", FirstNormalObjectId); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot fetch in pg_total_relation_size. error code %d", ret); + + /* fill table_size table with table oid and size info. */ + resetStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.table_size values"); + TupleDesc tupdesc = SPI_tuptable->tupdesc; + for(int i = 0; i < SPI_processed; i++) + { + HeapTuple tup; + bool isnull; + Oid oid; + int64 sz; + + tup = SPI_tuptable->vals[i]; + oid = SPI_getbinval(tup,tupdesc, 1, &isnull); + sz = SPI_getbinval(tup,tupdesc, 2, &isnull); + + appendStringInfo(&buf, " ( %u, %ld)", oid, sz); + if(i + 1 < SPI_processed) + appendStringInfoChar(&buf, ','); + } + appendStringInfo(&buf, ";"); + ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert table_size table: error code %d", ret); diff --git a/expected/test_table_size.out b/expected/test_table_size.out new file mode 100644 index 00000000000..22c5523a16e --- /dev/null +++ b/expected/test_table_size.out @@ -0,0 +1,22 @@ +-- Test tablesize table +create table a(i text); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into a select * from generate_series(1,10000); +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +create table buffer(oid oid, relname name, size bigint); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +with size as ( select oid,relname,pg_total_relation_size(oid) from pg_class) insert into buffer select size.oid, size.relname, size.pg_total_relation_size from size, diskquota.table_size as dt where dt.tableid = size.oid and relname = 'a'; +insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; +select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; + ?column? +---------- + t +(1 row) + diff --git a/quotamodel.c b/quotamodel.c index f8d9f297cbc..b75b50925d8 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -42,6 +42,7 @@ #include "utils/syscache.h" #include +#include #include "gp_activetable.h" #include "diskquota.h" @@ -600,6 +601,12 @@ calculate_table_disk_usage(bool is_init) /* skip to recalculate the tables which are not in active list */ if (active_tbl_found) { + /* pretend process as utility mode, and append the table size on master */ + GpRoleValue Gp_role_backup = Gp_role; + Gp_role = GP_ROLE_UTILITY; + active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); + Gp_role = Gp_role_backup; + /* firstly calculate the updated total size of a table */ updated_total_size = active_table_entry->tablesize - tsentry->totalsize; diff --git a/sql/test_table_size.sql b/sql/test_table_size.sql new file mode 100644 index 00000000000..80279cb180f --- /dev/null +++ b/sql/test_table_size.sql @@ -0,0 +1,14 @@ +-- Test tablesize table + +create table a(i text); + +insert into a select * from generate_series(1,10000); + +select pg_sleep(2); +create table buffer(oid oid, relname name, size bigint); + +with size as ( select oid,relname,pg_total_relation_size(oid) from pg_class) insert into buffer select size.oid, size.relname, size.pg_total_relation_size from size, diskquota.table_size as dt where dt.tableid = size.oid and relname = 'a'; + +insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; + +select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; From 46adcc0420ac76b9a0edc4c58652f68d6de02227 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Fri, 26 Apr 2019 16:04:22 +0800 Subject: [PATCH 035/330] Set Gp_role to GP_ROLE_DISPATCH explicitly. --- diskquota.c | 6 ++++++ expected/test_truncate.out | 2 +- sql/test_truncate.sql | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/diskquota.c b/diskquota.c index 42c27987d6e..ebdba2776c2 100644 --- a/diskquota.c +++ b/diskquota.c @@ -283,6 +283,9 @@ disk_quota_worker_main(Datum main_arg) */ init_ps_display("bgworker:", "[diskquota]", dbname, ""); + /* diskquota worker should has Gp_role as dispatcher */ + Gp_role = GP_ROLE_DISPATCH; + /* * Initialize diskquota related local hash map and refresh model * immediately @@ -410,6 +413,9 @@ disk_quota_launcher_main(Datum main_arg) */ BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); + /* diskquota launcher should has Gp_role as dispatcher */ + Gp_role = GP_ROLE_DISPATCH; + /* * use table diskquota_namespace.database_list to store diskquota enabled * database. diff --git a/expected/test_truncate.out b/expected/test_truncate.out index 170fd1a0c5d..61328e7c771 100644 --- a/expected/test_truncate.out +++ b/expected/test_truncate.out @@ -14,7 +14,7 @@ CREATE TABLE b (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT pg_sleep(20); pg_sleep ---------- diff --git a/sql/test_truncate.sql b/sql/test_truncate.sql index 5c4e616fcff..79b2a0f5ad1 100644 --- a/sql/test_truncate.sql +++ b/sql/test_truncate.sql @@ -5,7 +5,7 @@ SET search_path TO s7; CREATE TABLE a (i int); CREATE TABLE b (i int); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT pg_sleep(20); -- expect insert fail INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); From 6126c18a64ffd3a0e388dd03cd818f827b511972 Mon Sep 17 00:00:00 2001 From: Hubert Zhang Date: Fri, 26 Apr 2019 17:47:31 +0800 Subject: [PATCH 036/330] Avoid Gp_role failed to set back to GP_ROLE_DISPATCH when error happen. --- quotamodel.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index b75b50925d8..2c0073c0d93 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -602,10 +602,23 @@ calculate_table_disk_usage(bool is_init) if (active_tbl_found) { /* pretend process as utility mode, and append the table size on master */ - GpRoleValue Gp_role_backup = Gp_role; Gp_role = GP_ROLE_UTILITY; - active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); - Gp_role = Gp_role_backup; + + /* DirectFunctionCall1 may fail, since table maybe dropped by other backend */ + PG_TRY(); + { + /* call pg_total_relation_size to get the active table size */ + active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); + } + PG_CATCH(); + { + HOLD_INTERRUPTS(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + + Gp_role = GP_ROLE_DISPATCH; /* firstly calculate the updated total size of a table */ updated_total_size = active_table_entry->tablesize - tsentry->totalsize; From b5da219edd2344d71e120e02540656450cbb02bd Mon Sep 17 00:00:00 2001 From: Weinan WANG Date: Fri, 10 May 2019 10:52:13 +0800 Subject: [PATCH 037/330] Set BG worker applicaton name (#37) give background worker an application_name to indicate `gpstop` ignore when do sanity checking. --- diskquota.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/diskquota.c b/diskquota.c index ebdba2776c2..0affd769af6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -58,6 +58,7 @@ PG_MODULE_MAGIC; #define MAX_NUM_MONITORED_DB 10 #define DISKQUOTA_DB "diskquota" +#define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" /* flags set by signal handlers */ static volatile sig_atomic_t got_sighup = false; @@ -275,6 +276,10 @@ disk_quota_worker_main(Datum main_arg) /* Connect to our database */ BackgroundWorkerInitializeConnection(dbname, NULL); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, + PGC_USERSET,PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0); + /* * Set ps display name of the worker process of diskquota, so we can * distinguish them quickly. Note: never mind parameter name of the @@ -413,6 +418,10 @@ disk_quota_launcher_main(Datum main_arg) */ BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, + PGC_USERSET,PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0); + /* diskquota launcher should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; From 3843f899a9533770e9cfa1f30d43a716ac2af5aa Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Mon, 22 Apr 2019 16:43:32 +0800 Subject: [PATCH 038/330] Update release pipeline to meet gp-integration-testing need output of released tar ball should be `pkgname-version.tar.gz` See repo pivotal/gp-release --- concourse/pipeline/release_pipeline.yml | 6 +++--- concourse/scripts/build_diskquota.sh | 7 ++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml index 8b32d1b1986..3713eba4495 100644 --- a/concourse/pipeline/release_pipeline.yml +++ b/concourse/pipeline/release_pipeline.yml @@ -45,7 +45,7 @@ resources: source: branch: gpdb uri: https://github.com/greenplum-db/diskquota.git - tag_filter: 1.* + tag_filter: 0.* # gpdb binary on gcs is located as different folder for different version # use gcs_gpdb_binary_folder to specify them. @@ -68,14 +68,14 @@ resources: source: bucket: {{gcs-bucket}} json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel7-x86_64.tar.gz + regexp: diskquota/released/gpdb6/rhel7/diskquota-(.*).tar.gz - name: bin_diskquota_centos6 type: gcs source: bucket: {{gcs-bucket}} json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel6-x86_64.tar.gz + regexp: diskquota/released/gpdb6/rhel6/diskquota-(.*).tar.gz ## jobs ## ====================================================================== diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 2feed00b5f4..53bc45d4f8e 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -33,11 +33,8 @@ function pkg() { popd if [ "${DEV_RELEASE}" == "release" ]; then case "$OSVER" in - centos6) - cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel6-x86_64.tar.gz - ;; - centos7) - cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel7-x86_64.tar.gz + centos6|centos7) + cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}.tar.gz ;; *) echo "Unknown OS: $OSVER"; exit 1 ;; esac From aa60e9787a4c56a02458ba634ba66c33ff062da1 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Wed, 15 May 2019 16:35:11 +0800 Subject: [PATCH 039/330] add INTEGRATION_TEST switch --- Makefile | 5 ++++- diskquota_schedule_int | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 diskquota_schedule_int diff --git a/Makefile b/Makefile index fcd59e88356..c4f84b33902 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,10 @@ PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) REGRESS = dummy +ifeq ("$(INTEGRATION_TEST)","y") +REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=init_file +else REGRESS_OPTS = --schedule=diskquota_schedule --init-file=init_file - +endif PGXS := $(shell pg_config --pgxs) include $(PGXS) diff --git a/diskquota_schedule_int b/diskquota_schedule_int new file mode 100644 index 00000000000..c7ea1b2f8a2 --- /dev/null +++ b/diskquota_schedule_int @@ -0,0 +1,10 @@ +test: init +test: prepare +test: test_table_size +test: test_fast_disk_check +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake +test: test_truncate +test: test_delete_quota +test: test_partition +test: clean +test: test_insert_after_drop From d5504c2f5bac63953fa7f49686dd69f86066c2cb Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Wed, 22 May 2019 11:21:38 +0800 Subject: [PATCH 040/330] Add trap code to dump regression.diffs when make fails --- concourse/scripts/test_diskquota.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 573a60395fc..42ea135e8ed 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -19,6 +19,7 @@ function test(){ gpconfig -c diskquota.naptime -v 2 gpstop -arf pushd diskquota_src + trap "[ -s regression.diffs ] && cat regression.diffs" EXIT make installcheck [ -s regression.diffs ] && cat regression.diffs && exit 1 ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 @@ -51,11 +52,7 @@ function setup_gpadmin_user() { } function install_diskquota() { - if [ "${DEV_RELEASE}" == "release" ]; then - tar -xzf bin_diskquota/diskquota*.tar.gz -C /usr/local/greenplum-db-devel - else - tar -xzf bin_diskquota/component_diskquota.tar.gz -C /usr/local/greenplum-db-devel - fi + tar -xzf bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel } function _main() { time install_gpdb From 901db5ac3c394ac1bb9d750ea9a629e777268153 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Thu, 23 May 2019 10:47:09 +0800 Subject: [PATCH 041/330] Log loop time if launcher loop takes too much time to run --- diskquota.c | 17 +++++++++++++++-- expected/test_partition.out | 2 +- sql/test_partition.sql | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/diskquota.c b/diskquota.c index 0affd769af6..405b6231eef 100644 --- a/diskquota.c +++ b/diskquota.c @@ -388,7 +388,11 @@ disk_quota_worker_main(Datum main_arg) proc_exit(0); } - +inline bool isAbnormalLoopTime(int diff_sec) +{ + int max_time = diskquota_naptime + 6; + return diff_sec > max_time; +} /* ---- Functions for launcher process ---- */ /* @@ -399,6 +403,7 @@ void disk_quota_launcher_main(Datum main_arg) { HASHCTL hash_ctl; + time_t loop_begin, loop_end; /* establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); @@ -451,6 +456,7 @@ disk_quota_launcher_main(Datum main_arg) /* main loop: do this until the SIGTERM handler tells us to terminate. */ EnableClientWaitTimeoutInterrupt(); StartIdleResourceCleanupTimers(); + loop_end = time(NULL); while (!got_sigterm) { int rc; @@ -489,6 +495,13 @@ disk_quota_launcher_main(Datum main_arg) ProcessConfigFile(PGC_SIGHUP); StartIdleResourceCleanupTimers(); } + loop_begin = loop_end; + loop_end = time(NULL); + if (isAbnormalLoopTime(loop_end - loop_begin)) + { + ereport(WARNING, (errmsg("[diskquota-loop] loop takes too much time %d/%d", + (int)(loop_end - loop_begin), diskquota_naptime))); + } } /* terminate all the diskquota worker processes before launcher exit */ @@ -595,7 +608,7 @@ start_workers_from_dblist(void) if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit"))); - for (i = 0; num < SPI_processed; i++) + for (i = 0; i < SPI_processed; i++) { HeapTuple tup; Oid dbid; diff --git a/expected/test_partition.out b/expected/test_partition.out index f8dc2195b04..6d67954584f 100644 --- a/expected/test_partition.out +++ b/expected/test_partition.out @@ -32,7 +32,7 @@ SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; -SELECT pg_sleep(5); +SELECT pg_sleep(10); pg_sleep ---------- diff --git a/sql/test_partition.sql b/sql/test_partition.sql index bde27c9060d..aaff1b049c5 100644 --- a/sql/test_partition.sql +++ b/sql/test_partition.sql @@ -19,7 +19,7 @@ SELECT pg_sleep(20); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; -SELECT pg_sleep(5); +SELECT pg_sleep(10); -- expect insert fail INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail From 4ce91969f9ae1db3d41f86d97e32950e08c525c0 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Thu, 23 May 2019 14:25:53 +0800 Subject: [PATCH 042/330] filter GP_IGNORE out from regression.diffs --- concourse/scripts/test_diskquota.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 42ea135e8ed..631e768c450 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -19,9 +19,9 @@ function test(){ gpconfig -c diskquota.naptime -v 2 gpstop -arf pushd diskquota_src - trap "[ -s regression.diffs ] && cat regression.diffs" EXIT + trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT make installcheck - [ -s regression.diffs ] && cat regression.diffs && exit 1 + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 export PGPORT=16432 echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh @@ -29,7 +29,7 @@ function test(){ rm /tmp/.s.PGSQL.15432* gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck - [ -s regression.diffs ] && cat regression.diffs && exit 1 + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 popd EOF export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 From fb3c5a0cac58ddaec0a502c0eb742f9b28b77ed0 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Thu, 30 May 2019 10:58:41 +0800 Subject: [PATCH 043/330] Add ubuntu support 1. Add ubuntu pipeline 2. Add both ubuntu build and test jobs --- concourse/pipeline/pipeline.yml | 70 +++++++++++++++++++++++++++- concourse/scripts/build_diskquota.sh | 4 +- concourse/scripts/test_diskquota.sh | 5 +- 3 files changed, 75 insertions(+), 4 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 5d17dd9dc30..39b2b4fc972 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -7,8 +7,10 @@ groups: jobs: - diskquota_centos6_build - diskquota_centos7_build + - diskquota_ubuntu18_build - diskquota_centos6_test - diskquota_centos7_test + - diskquota_ubuntu18_test resource_types: - name: gcs @@ -32,6 +34,12 @@ resources: repository: pivotaldata/centos-gpdb-dev tag: '7-gcc6.2-llvm3.7' +- name: ubuntu18-image + type: docker-image + source: + repository: pivotaldata/ubuntu-gpdb-dev + tag: '16.04' + # Github Source Codes - name: gpdb_src @@ -43,7 +51,7 @@ resources: - name: diskquota_src type: git source: - branch: gpdb + branch: gpdb uri: https://github.com/greenplum-db/diskquota.git # gpdb binary on gcs is located as different folder for different version @@ -62,6 +70,15 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz +- name: bin_gpdb_ubuntu18 + type: s3 + source: + access_key_id: {{bucket-access-key-id}} + bucket: procedural-languages-concourse-gpdb-5x-stable + region_name: {{aws-region}} + secret_access_key: {{bucket-secret-access-key}} + versioned_file: build/gpdb_6x/bin_gpdb_ubuntu16/bin_gpdb.tar.gz + - name: bin_diskquota_centos7 type: gcs source: @@ -76,6 +93,13 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/rhel6/component_diskquota.tar.gz +- name: bin_diskquota_ubuntu18 + type: gcs + source: + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/ubuntu18/component_diskquota.tar.gz + ## jobs ## ====================================================================== @@ -132,6 +156,30 @@ jobs: params: file: diskquota_artifacts/component_diskquota.tar.gz +- name: diskquota_ubuntu18_build + max_in_flight: 3 + plan: + - aggregate: + - get: ubuntu18-image + - get: diskquota_src + trigger: true + - get: bin_gpdb_ubuntu18 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: ubuntu18-image + input_mapping: + bin_gpdb: bin_gpdb_ubuntu18 + output_mapping: + bin_diskquota: bin_diskquota_ubuntu18 + params: + OSVER: ubuntu18 + GPDBVER: gp6 + - aggregate: + - put: bin_diskquota_ubuntu18 + params: + file: diskquota_artifacts/component_diskquota.tar.gz - name: diskquota_centos6_test plan: @@ -172,3 +220,23 @@ jobs: params: OSVER: centos7 GPDBVER: gp6 + +- name: diskquota_ubuntu18_test + plan: + - aggregate: + - get: ubuntu18-image + - get: diskquota_src + - get: bin_diskquota_ubuntu18 + passed: [diskquota_ubuntu18_build] + trigger: true + - get: bin_gpdb_ubuntu18 + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: ubuntu18-image + input_mapping: + bin_gpdb: bin_gpdb_ubuntu18 + bin_diskquota: bin_diskquota_ubuntu18 + params: + OSVER: ubuntu18 + GPDBVER: gp6 diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 53bc45d4f8e..2df9a63697a 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -7,7 +7,7 @@ TOP_DIR=${CWDIR}/../../../ source "${TOP_DIR}/gpdb_src/concourse/scripts/common.bash" function pkg() { - source /opt/gcc_env.sh + [ -f /opt/gcc_env.sh ] && source /opt/gcc_env.sh source /usr/local/greenplum-db-devel/greenplum_path.sh export USE_PGXS=1 @@ -33,7 +33,7 @@ function pkg() { popd if [ "${DEV_RELEASE}" == "release" ]; then case "$OSVER" in - centos6|centos7) + centos6|centos7|ubuntu18) cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}.tar.gz ;; *) echo "Unknown OS: $OSVER"; exit 1 ;; diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 631e768c450..c2b51764794 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -7,7 +7,7 @@ TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts source "${GPDB_CONCOURSE_DIR}/common.bash" function test(){ - sudo chown -R gpadmin:gpadmin ${TOP_DIR}; + chown -R gpadmin:gpadmin ${TOP_DIR}; cat > /home/gpadmin/test.sh <<-EOF set -exo pipefail source gpdb_src/gpAux/gpdemo/gpdemo-env.sh @@ -46,6 +46,9 @@ function setup_gpadmin_user() { centos*) ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "centos" ;; + ubuntu*) + ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "ubuntu" + ;; *) echo "Unknown OS: $OSVER"; exit 1 ;; esac From c14bfc8c41654dd478dc82a595fcef164650efe3 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Mon, 3 Jun 2019 10:25:16 +0800 Subject: [PATCH 044/330] Update pipeline image to ubuntu 18.04 --- concourse/pipeline/pipeline.yml | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 39b2b4fc972..4faea164e7c 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -37,8 +37,14 @@ resources: - name: ubuntu18-image type: docker-image source: - repository: pivotaldata/ubuntu-gpdb-dev - tag: '16.04' + repository: pivotaldata/gpdb6-ubuntu18.04-build + tag: latest + +- name: ubuntu18-image-test + type: docker-image + source: + repository: pivotaldata/gpdb6-ubuntu18.04-test + tag: latest # Github Source Codes @@ -71,13 +77,11 @@ resources: versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz - name: bin_gpdb_ubuntu18 - type: s3 + type: gcs source: - access_key_id: {{bucket-access-key-id}} - bucket: procedural-languages-concourse-gpdb-5x-stable - region_name: {{aws-region}} - secret_access_key: {{bucket-secret-access-key}} - versioned_file: build/gpdb_6x/bin_gpdb_ubuntu16/bin_gpdb.tar.gz + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz - name: bin_diskquota_centos7 type: gcs @@ -224,7 +228,7 @@ jobs: - name: diskquota_ubuntu18_test plan: - aggregate: - - get: ubuntu18-image + - get: ubuntu18-image-test - get: diskquota_src - get: bin_diskquota_ubuntu18 passed: [diskquota_ubuntu18_build] @@ -233,7 +237,7 @@ jobs: - get: gpdb_src - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml - image: ubuntu18-image + image: ubuntu18-image-test input_mapping: bin_gpdb: bin_gpdb_ubuntu18 bin_diskquota: bin_diskquota_ubuntu18 From a94afc92f8b3232771deebdd68c745e2f97ab34b Mon Sep 17 00:00:00 2001 From: Hao Wu <37101401+gfphoenix78@users.noreply.github.com> Date: Mon, 10 Jun 2019 18:39:12 +0800 Subject: [PATCH 045/330] Update build and release pipelines (#42) Build pipeline: each platform has one job which contains 2 tasks: build + test Release pipeline: contains release + test release for each gpdb binary release steps: * build * test * push to released bucket --- concourse/pipeline/pipeline.yml | 169 +++++------------------- concourse/pipeline/release_pipeline.yml | 139 +++++++++++++------ concourse/scripts/build_diskquota.sh | 37 ++---- concourse/scripts/test_diskquota.sh | 14 +- concourse/tasks/build_diskquota.yml | 3 - concourse/tasks/test_diskquota.yml | 3 - 6 files changed, 145 insertions(+), 220 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 4faea164e7c..b911f0c1de5 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -5,12 +5,9 @@ groups: - name: GPDB6 jobs: - - diskquota_centos6_build - - diskquota_centos7_build - - diskquota_ubuntu18_build - - diskquota_centos6_test - - diskquota_centos7_test - - diskquota_ubuntu18_test + - diskquota_centos6_build_test + - diskquota_centos7_build_test + - diskquota_ubuntu18_build_test resource_types: - name: gcs @@ -34,7 +31,7 @@ resources: repository: pivotaldata/centos-gpdb-dev tag: '7-gcc6.2-llvm3.7' -- name: ubuntu18-image +- name: ubuntu18-image-build type: docker-image source: repository: pivotaldata/gpdb6-ubuntu18.04-build @@ -57,7 +54,7 @@ resources: - name: diskquota_src type: git source: - branch: gpdb + branch: gpdb uri: https://github.com/greenplum-db/diskquota.git # gpdb binary on gcs is located as different folder for different version @@ -68,7 +65,6 @@ resources: bucket: {{gcs-bucket-intermediates}} json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz - - name: bin_gpdb_centos7 type: gcs source: @@ -83,164 +79,65 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz -- name: bin_diskquota_centos7 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/rhel7/component_diskquota.tar.gz - -- name: bin_diskquota_centos6 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/rhel6/component_diskquota.tar.gz - -- name: bin_diskquota_ubuntu18 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: diskquota/published/((gcs_diskquota_binary_folder))/ubuntu18/component_diskquota.tar.gz - ## jobs ## ====================================================================== jobs: -# Build PLR GPPKG - -- name: diskquota_centos7_build +- name: diskquota_centos7_build_test max_in_flight: 3 plan: - aggregate: - get: centos-gpdb-dev-7 - get: diskquota_src trigger: true - - get: bin_gpdb_centos7 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-7 - input_mapping: - bin_gpdb: bin_gpdb_centos7 - output_mapping: - bin_diskquota: bin_diskquota_centos7 - params: - OSVER: centos7 - GPDBVER: gp6 - - aggregate: - - put: bin_diskquota_centos7 - params: - file: diskquota_artifacts/component_diskquota.tar.gz - -- name: diskquota_centos6_build - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-6 - - get: diskquota_src - trigger: true - - get: bin_gpdb_centos6 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-6 - input_mapping: - bin_gpdb: bin_gpdb_centos6 - output_mapping: - bin_diskquota: bin_diskquota_centos6 - params: - OSVER: centos6 - GPDBVER: gp6 - - aggregate: - - put: bin_diskquota_centos6 - params: - file: diskquota_artifacts/component_diskquota.tar.gz - -- name: diskquota_ubuntu18_build - max_in_flight: 3 - plan: - - aggregate: - - get: ubuntu18-image - - get: diskquota_src - trigger: true - - get: bin_gpdb_ubuntu18 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: ubuntu18-image - input_mapping: - bin_gpdb: bin_gpdb_ubuntu18 - output_mapping: - bin_diskquota: bin_diskquota_ubuntu18 - params: - OSVER: ubuntu18 - GPDBVER: gp6 - - aggregate: - - put: bin_diskquota_ubuntu18 - params: - file: diskquota_artifacts/component_diskquota.tar.gz - -- name: diskquota_centos6_test - plan: - - aggregate: - - get: centos-gpdb-dev-6 - - get: diskquota_src - - get: bin_diskquota_centos6 - passed: [diskquota_centos6_build] - trigger: true - - get: bin_gpdb_centos6 + - get: bin_gpdb + resource: bin_gpdb_centos7 - get: gpdb_src + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-7 - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-6 + image: centos-gpdb-dev-7 input_mapping: - bin_gpdb: bin_gpdb_centos6 - bin_diskquota: bin_diskquota_centos6 - params: - OSVER: centos6 - GPDBVER: gp6 + bin_diskquota: diskquota_artifacts -- name: diskquota_centos7_test +- name: diskquota_centos6_build_test + max_in_flight: 3 plan: - aggregate: - - get: centos-gpdb-dev-7 + - get: centos-gpdb-dev-6 - get: diskquota_src - - get: bin_diskquota_centos7 - passed: [diskquota_centos7_build] trigger: true - - get: bin_gpdb_centos7 + - get: bin_gpdb + resource: bin_gpdb_centos6 - get: gpdb_src + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: centos-gpdb-dev-6 - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-7 + image: centos-gpdb-dev-6 input_mapping: - bin_gpdb: bin_gpdb_centos7 - bin_diskquota: bin_diskquota_centos7 - params: - OSVER: centos7 - GPDBVER: gp6 + bin_diskquota: diskquota_artifacts -- name: diskquota_ubuntu18_test +- name: diskquota_ubuntu18_build_test + max_in_flight: 3 plan: - aggregate: + - get: ubuntu18-image-build - get: ubuntu18-image-test - get: diskquota_src - - get: bin_diskquota_ubuntu18 - passed: [diskquota_ubuntu18_build] trigger: true - - get: bin_gpdb_ubuntu18 + - get: bin_gpdb + resource: bin_gpdb_ubuntu18 - get: gpdb_src + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: ubuntu18-image-build - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: ubuntu18-image-test input_mapping: - bin_gpdb: bin_gpdb_ubuntu18 - bin_diskquota: bin_diskquota_ubuntu18 - params: - OSVER: ubuntu18 - GPDBVER: gp6 + bin_diskquota: diskquota_artifacts + diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml index 3713eba4495..8a81f996973 100644 --- a/concourse/pipeline/release_pipeline.yml +++ b/concourse/pipeline/release_pipeline.yml @@ -1,14 +1,16 @@ ## ====================================================================== -## Pipeline for GPDB PL/R GPPKG +## Pipeline for diskquota ## ====================================================================== groups: - name: GPDB6 jobs: - - diskquota_centos6_build - - diskquota_centos7_build + - release_centos6 + - release_centos7 + - release_ubuntu18 - diskquota_centos6_test_release - diskquota_centos7_test_release + - diskquota_ubuntu18_test_release resource_types: - name: gcs @@ -32,6 +34,19 @@ resources: repository: pivotaldata/centos-gpdb-dev tag: '7-gcc6.2-llvm3.7' +- name: ubuntu18-image-build + type: docker-image + source: + repository: pivotaldata/gpdb6-ubuntu18.04-build + tag: latest + +- name: ubuntu18-image-test + type: docker-image + source: + repository: pivotaldata/gpdb6-ubuntu18.04-test + tag: latest + + # Github Source Codes - name: gpdb_src @@ -63,6 +78,13 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz +- name: bin_gpdb_ubuntu18 + type: gcs + source: + bucket: ((gcs-bucket-intermediates)) + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz + - name: bin_diskquota_centos7 type: gcs source: @@ -77,101 +99,138 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} regexp: diskquota/released/gpdb6/rhel6/diskquota-(.*).tar.gz +- name: bin_diskquota_ubuntu18 + type: gcs + source: + bucket: {{gcs-bucket}} + json_key: {{concourse-gcs-resources-service-account-key}} + regexp: diskquota/released/gpdb6/ubuntu18/diskquota-(.*).tar.gz + ## jobs ## ====================================================================== jobs: -# Build PLR GPPKG -- name: diskquota_centos7_build +- name: release_centos7 max_in_flight: 3 plan: - aggregate: - get: centos-gpdb-dev-7 - get: diskquota_src trigger: true - - get: bin_gpdb_centos7 + - get: bin_gpdb + resource: bin_gpdb_centos7 - get: gpdb_src - aggregate: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-7 + - aggregate: + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-7 input_mapping: - bin_gpdb: bin_gpdb_centos7 - output_mapping: - bin_diskquota: bin_diskquota_centos7 - params: - OSVER: centos7 - GPDBVER: gp6 - DEV_RELEASE: release + bin_diskquota: diskquota_artifacts - aggregate: - put: bin_diskquota_centos7 params: file: diskquota_artifacts/diskquota*.tar.gz -- name: diskquota_centos6_build +- name: release_centos6 max_in_flight: 3 plan: - aggregate: - get: centos-gpdb-dev-6 - get: diskquota_src trigger: true - - get: bin_gpdb_centos6 + - get: bin_gpdb + resource: bin_gpdb_centos6 - get: gpdb_src - aggregate: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-6 + - aggregate: + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: centos-gpdb-dev-6 input_mapping: - bin_gpdb: bin_gpdb_centos6 - output_mapping: - bin_diskquota: bin_diskquota_centos6 - params: - OSVER: centos6 - GPDBVER: gp6 - DEV_RELEASE: release + bin_diskquota: diskquota_artifacts - aggregate: - put: bin_diskquota_centos6 params: file: diskquota_artifacts/diskquota*.tar.gz +- name: release_ubuntu18 + max_in_flight: 3 + plan: + - aggregate: + - get: ubuntu18-image-build + - get: ubuntu18-image-test + - get: diskquota_src + trigger: true + - get: bin_gpdb + resource: bin_gpdb_ubuntu18 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: ubuntu18-image-build + - aggregate: + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: ubuntu18-image-test + input_mapping: + bin_diskquota: diskquota_artifacts + - aggregate: + - put: bin_diskquota_ubuntu18 + params: + file: diskquota_artifacts/diskquota*.tar.gz - name: diskquota_centos6_test_release + max_in_flight: 3 plan: - aggregate: - get: centos-gpdb-dev-6 - get: diskquota_src - - get: bin_diskquota_centos6 - - get: bin_gpdb_centos6 + - get: bin_diskquota + resource: bin_diskquota_centos6 + - get: bin_gpdb + resource: bin_gpdb_centos6 trigger: true - get: gpdb_src - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: centos-gpdb-dev-6 - input_mapping: - bin_gpdb: bin_gpdb_centos6 - bin_diskquota: bin_diskquota_centos6 - params: - OSVER: centos6 - GPDBVER: gp6 - DEV_RELEASE: release - name: diskquota_centos7_test_release + max_in_flight: 3 plan: - aggregate: - get: centos-gpdb-dev-7 - get: diskquota_src - - get: bin_diskquota_centos7 - - get: bin_gpdb_centos7 + - get: bin_diskquota + resource: bin_diskquota_centos7 + - get: bin_gpdb + resource: bin_gpdb_centos7 trigger: true - get: gpdb_src - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: centos-gpdb-dev-7 - input_mapping: - bin_gpdb: bin_gpdb_centos7 - bin_diskquota: bin_diskquota_centos7 - params: - OSVER: centos7 - GPDBVER: gp6 - DEV_RELEASE: release + +- name: diskquota_ubuntu18_test_release + max_in_flight: 3 + plan: + - aggregate: + - get: ubuntu18-image-test + - get: diskquota_src + - get: bin_diskquota + resource: bin_diskquota_ubuntu18 + - get: bin_gpdb + resource: bin_gpdb_ubuntu18 + trigger: true + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: ubuntu18-image-test diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 2df9a63697a..0f161b5b867 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -12,38 +12,25 @@ function pkg() { export USE_PGXS=1 pushd diskquota_src/ - if [ "${DEV_RELEASE}" == "release" ]; then - if git describe --tags >/dev/null 2>&1 ; then - echo "git describe failed" || exit 1 - fi - DISKQUOTA_VERSION=$(git describe --tags) - fi + DISKQUOTA_VERSION=$(git describe --tags) make clean make install popd - pushd /usr/local/greenplum-db-devel/ - echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component - chmod a+x install_gpdb_component - tar -czf $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz \ - lib/postgresql/diskquota.so \ - share/postgresql/extension/diskquota.control \ - share/postgresql/extension/diskquota--1.0.sql \ - install_gpdb_component - popd - if [ "${DEV_RELEASE}" == "release" ]; then - case "$OSVER" in - centos6|centos7|ubuntu18) - cp $TOP_DIR/diskquota_artifacts/component_diskquota.tar.gz $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}.tar.gz - ;; - *) echo "Unknown OS: $OSVER"; exit 1 ;; - esac - fi + pushd /usr/local/greenplum-db-devel/ + echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component + chmod a+x install_gpdb_component + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}.tar.gz \ + lib/postgresql/diskquota.so \ + share/postgresql/extension/diskquota.control \ + share/postgresql/extension/diskquota--1.0.sql \ + install_gpdb_component + popd } function _main() { - time install_gpdb - time pkg + time install_gpdb + time pkg } _main "$@" diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index c2b51764794..6669c0a9a4c 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -39,19 +39,7 @@ function test(){ } function setup_gpadmin_user() { - case "$OSVER" in - suse*) - ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "sles" - ;; - centos*) - ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "centos" - ;; - ubuntu*) - ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash "ubuntu" - ;; - *) echo "Unknown OS: $OSVER"; exit 1 ;; - esac - + ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash } function install_diskquota() { diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index dc483709650..1b2e7c7b138 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -12,6 +12,3 @@ outputs: run: path: diskquota_src/concourse/scripts/build_diskquota.sh params: - OSVER: - GPDBVER: - DEV_RELEASE: devel diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml index e32abaa42d3..0dadd31e9c2 100644 --- a/concourse/tasks/test_diskquota.yml +++ b/concourse/tasks/test_diskquota.yml @@ -10,6 +10,3 @@ inputs: run: path: diskquota_src/concourse/scripts/test_diskquota.sh params: - OSVER: - GPDBVER: - DEV_RELEASE: devel From 3edbaf305fb2e07ac0ccfab18809321deb78e0de Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Tue, 11 Jun 2019 09:53:04 +0800 Subject: [PATCH 046/330] Add static to inline function & Change max_time to be abnormal --- diskquota.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/diskquota.c b/diskquota.c index 405b6231eef..ef1f9c8fe69 100644 --- a/diskquota.c +++ b/diskquota.c @@ -388,9 +388,13 @@ disk_quota_worker_main(Datum main_arg) proc_exit(0); } -inline bool isAbnormalLoopTime(int diff_sec) +static inline bool isAbnormalLoopTime(int diff_sec) { - int max_time = diskquota_naptime + 6; + int max_time; + if (diskquota_naptime>6) + max_time = diskquota_naptime * 2; + else + max_time = diskquota_naptime + 6; return diff_sec > max_time; } From bee5212fac83894863b351fdb5218452ab6b65f2 Mon Sep 17 00:00:00 2001 From: Hao Wu Date: Tue, 11 Jun 2019 10:12:49 +0800 Subject: [PATCH 047/330] Update tag_filter of release pipeline to 1.* --- concourse/pipeline/release_pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml index 8a81f996973..152b5de1914 100644 --- a/concourse/pipeline/release_pipeline.yml +++ b/concourse/pipeline/release_pipeline.yml @@ -60,7 +60,7 @@ resources: source: branch: gpdb uri: https://github.com/greenplum-db/diskquota.git - tag_filter: 0.* + tag_filter: 1.* # gpdb binary on gcs is located as different folder for different version # use gcs_gpdb_binary_folder to specify them. From 3f0724b6304477782fe6ae1ba20b2e7872455cc9 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Mon, 4 Nov 2019 19:21:09 +0800 Subject: [PATCH 048/330] Remove debug_query_string when start diskquota The issue has been fixed in PR#6469 [https://github.com/greenplum-db/gpdb/pull/6469/] So remove debug_query_string to avoid misuse the debug parameter --- diskquota.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/diskquota.c b/diskquota.c index ef1f9c8fe69..26c606afe11 100644 --- a/diskquota.c +++ b/diskquota.c @@ -533,9 +533,6 @@ create_monitor_db_table(void) sql = "create schema if not exists diskquota_namespace;" "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; - /* debug_query_string need to be set for SPI_execute utility functions. */ - debug_query_string = sql; - StartTransactionCommand(); /* From aa9bcbcc121398e9af17fabb912ebccee1999091 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 5 Nov 2019 17:00:14 +0800 Subject: [PATCH 049/330] Set debug_query_string before SPI_exectue 1. Allowed GPDB crash when debug_query_string is null before SPI_exectue --- diskquota.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/diskquota.c b/diskquota.c index 26c606afe11..f411d212594 100644 --- a/diskquota.c +++ b/diskquota.c @@ -552,6 +552,9 @@ create_monitor_db_table(void) PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; + /* debug_query_string need to be set for SPI_execute utility functions. */ + debug_query_string = sql; + if (SPI_execute(sql, false, 0) != SPI_OK_UTILITY) { ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql:'%s', errno:%d", sql, errno))); @@ -564,6 +567,7 @@ create_monitor_db_table(void) EmitErrorReport(); FlushErrorState(); ret = false; + debug_query_string = NULL; /* Now we can allow interrupts again */ RESUME_INTERRUPTS(); } From d4036ca74630293c45f31e2c9dc51f37008bd7ed Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Wed, 6 Nov 2019 11:15:37 +0800 Subject: [PATCH 050/330] update PGPORT due to concourse pipeline changed --- concourse/scripts/test_diskquota.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 6669c0a9a4c..f792406b3bc 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -16,17 +16,17 @@ function test(){ createdb diskquota gpconfig -c shared_preload_libraries -v 'diskquota' gpstop -arf - gpconfig -c diskquota.naptime -v 2 + gpconfig -c diskquota.naptime -v 1 gpstop -arf pushd diskquota_src trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT make installcheck [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 - export PGPORT=16432 + export PGPORT=6001 echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh - rm /tmp/.s.PGSQL.15432* + rm /tmp/.s.PGSQL.6000* gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 From b2df1d7a85a22a52c97f379bf704ceaec47f8a8b Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Thu, 12 Dec 2019 17:44:54 +0800 Subject: [PATCH 051/330] Change release pipeline folder name 1. change filename 2. update script file --- concourse/pipeline/pipeline.yml | 6 ++++++ concourse/pipeline/release_pipeline.yml | 12 +++++++++--- concourse/scripts/build_diskquota.sh | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index b911f0c1de5..c2f115610ba 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -96,6 +96,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-7 + params: + DISKQUOTA_OS: rhel7 - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: centos-gpdb-dev-7 @@ -115,6 +117,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-6 + params: + DISKQUOTA_OS: rhel6 - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: centos-gpdb-dev-6 @@ -135,6 +139,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: ubuntu18-image-build + params: + DISKQUOTA_OS: ubuntu18.04 - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml image: ubuntu18-image-test diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml index 152b5de1914..bfecd95d188 100644 --- a/concourse/pipeline/release_pipeline.yml +++ b/concourse/pipeline/release_pipeline.yml @@ -90,21 +90,21 @@ resources: source: bucket: {{gcs-bucket}} json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/rhel7/diskquota-(.*).tar.gz + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - name: bin_diskquota_centos6 type: gcs source: bucket: {{gcs-bucket}} json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/rhel6/diskquota-(.*).tar.gz + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - name: bin_diskquota_ubuntu18 type: gcs source: bucket: {{gcs-bucket}} json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/ubuntu18/diskquota-(.*).tar.gz + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz ## jobs ## ====================================================================== @@ -131,6 +131,8 @@ jobs: image: centos-gpdb-dev-7 input_mapping: bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel7 - aggregate: - put: bin_diskquota_centos7 params: @@ -156,6 +158,8 @@ jobs: image: centos-gpdb-dev-6 input_mapping: bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel6 - aggregate: - put: bin_diskquota_centos6 params: @@ -182,6 +186,8 @@ jobs: image: ubuntu18-image-test input_mapping: bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: ubuntu18.04 - aggregate: - put: bin_diskquota_ubuntu18 params: diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 0f161b5b867..9c09e18ec4e 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -20,7 +20,7 @@ function pkg() { pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}.tar.gz \ + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-${DISKQUOTA_OS}_x86_64.tar.gz \ lib/postgresql/diskquota.so \ share/postgresql/extension/diskquota.control \ share/postgresql/extension/diskquota--1.0.sql \ From 0c7b621d5d3ebe5e060bdfdefb3f252cfafe0a4f Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Thu, 12 Dec 2019 19:07:27 +0800 Subject: [PATCH 052/330] Change release filename for concourse pipeline --- concourse/tasks/build_diskquota.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index 1b2e7c7b138..e7c74885727 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -12,3 +12,4 @@ outputs: run: path: diskquota_src/concourse/scripts/build_diskquota.sh params: + DISKQUOTA_OS: From 362fb52d24eb99240ed5d7097d8b3099f4ebff89 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Thu, 12 Dec 2019 20:04:39 +0800 Subject: [PATCH 053/330] Concourse can generate filename based on OS type --- concourse/scripts/build_diskquota.sh | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 9c09e18ec4e..1c18f57ae03 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -20,11 +20,30 @@ function pkg() { pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-${DISKQUOTA_OS}_x86_64.tar.gz \ + case "$DISKQUOTA_OS" in + rhel6) + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel6_x86_64.tar.gz \ lib/postgresql/diskquota.so \ share/postgresql/extension/diskquota.control \ share/postgresql/extension/diskquota--1.0.sql \ install_gpdb_component + ;; + rhel7) + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel7_x86_64.tar.gz \ + lib/postgresql/diskquota.so \ + share/postgresql/extension/diskquota.control \ + share/postgresql/extension/diskquota--1.0.sql \ + install_gpdb_component + ;; + ubuntu18.04) + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-ubuntu18.04_x86_64.tar.gz \ + lib/postgresql/diskquota.so \ + share/postgresql/extension/diskquota.control \ + share/postgresql/extension/diskquota--1.0.sql \ + install_gpdb_component + ;; + *) echo "Unknown OS: $OSVER"; exit 1 ;; + esac popd } From 3fa6960fda397cd225d10d6615bddc4d18ccbe5f Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Thu, 15 Oct 2020 15:47:32 +0800 Subject: [PATCH 054/330] Avoid to collect active table information in either master or mirror segments --- gp_activetable.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gp_activetable.c b/gp_activetable.c index 573e8329909..6320f4a1685 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -162,6 +162,13 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) DiskQuotaActiveTableFileEntry item; bool found = false; + + /* We do not collect the active table in either master or mirror segments */ + if (IS_QUERY_DISPATCHER() || IsRoleMirror()) + { + return; + } + MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); item.dbid = relFileNode->node.dbNode; item.relfilenode = relFileNode->node.relNode; From a6a0f6ff93d3f0b5fa7aca439f801a2bb3e0687a Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 8 Dec 2020 15:25:09 +0800 Subject: [PATCH 055/330] Avoid run pg_relation_size on entry-db, fix the table_size test case Signed-off-by: Sasasu --- expected/test_table_size.out | 2 +- sql/test_table_size.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/test_table_size.out b/expected/test_table_size.out index 22c5523a16e..70370f8af1b 100644 --- a/expected/test_table_size.out +++ b/expected/test_table_size.out @@ -12,7 +12,7 @@ select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -with size as ( select oid,relname,pg_total_relation_size(oid) from pg_class) insert into buffer select size.oid, size.relname, size.pg_total_relation_size from size, diskquota.table_size as dt where dt.tableid = size.oid and relname = 'a'; +insert in buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; ?column? diff --git a/sql/test_table_size.sql b/sql/test_table_size.sql index 80279cb180f..deefaa85646 100644 --- a/sql/test_table_size.sql +++ b/sql/test_table_size.sql @@ -7,7 +7,7 @@ insert into a select * from generate_series(1,10000); select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); -with size as ( select oid,relname,pg_total_relation_size(oid) from pg_class) insert into buffer select size.oid, size.relname, size.pg_total_relation_size from size, diskquota.table_size as dt where dt.tableid = size.oid and relname = 'a'; +insert in buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; From a1293d2611fc411aba058ff2537a4c592896dd33 Mon Sep 17 00:00:00 2001 From: Ivan Leskin Date: Fri, 4 Sep 2020 16:17:32 +0300 Subject: [PATCH 056/330] Reposition PostgreSQL GUC declaration GUC variables must be declared before any other actions are made by diskquota. In particular, the value of 'diskquota_max_active_tables' (GUC 'diskquota.max_active_tables') is used in 'DiskQuotaShmemSize()'. The late GUC variable declaration caused this variable to be '0', thus leading to allocation of insufficient amount of memory. Fix this: 1. Move GUC declarations to a separate static function 2. Call this function before any other actions performed in _PG_init() --- diskquota.c | 86 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/diskquota.c b/diskquota.c index f411d212594..5e998f24f2e 100644 --- a/diskquota.c +++ b/diskquota.c @@ -95,6 +95,7 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); +static void define_guc_variables(void); static bool start_worker_by_dboid(Oid dbid); static void start_workers_from_dblist(void); static void create_monitor_db_table(void); @@ -128,48 +129,13 @@ _PG_init(void) if (!process_shared_preload_libraries_in_progress) ereport(ERROR, (errmsg("diskquota.so not in shared_preload_libraries."))); + /* values are used in later calls */ + define_guc_variables(); + init_disk_quota_shmem(); init_disk_quota_enforcement(); init_active_table_hook(); - /* get the configuration */ - DefineCustomIntVariable("diskquota.naptime", - "Duration between each check (in seconds).", - NULL, - &diskquota_naptime, - 2, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomIntVariable("diskquota.max_active_tables", - "max number of active tables monitored by disk-quota", - NULL, - &diskquota_max_active_tables, - 1 * 1024 * 1024, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomBoolVariable("diskquota.enable_hardlimit", - "Use in-query diskquota enforcement", - NULL, - &diskquota_enable_hardlimit, - false, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { @@ -250,6 +216,50 @@ disk_quota_sigusr1(SIGNAL_ARGS) errno = save_errno; } +/* + * Define GUC variables used by diskquota + */ +static void +define_guc_variables(void) +{ + DefineCustomIntVariable("diskquota.naptime", + "Duration between each check (in seconds).", + NULL, + &diskquota_naptime, + 2, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + DefineCustomIntVariable("diskquota.max_active_tables", + "max number of active tables monitored by disk-quota", + NULL, + &diskquota_max_active_tables, + 1 * 1024 * 1024, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + DefineCustomBoolVariable("diskquota.enable_hardlimit", + "Use in-query diskquota enforcement", + NULL, + &diskquota_enable_hardlimit, + false, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); +} + /* ---- Functions for disk quota worker process ---- */ /* From 8674d7a0afde7d2e01e786332603c602d6891205 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Fri, 4 Dec 2020 16:21:47 +0800 Subject: [PATCH 057/330] Add dbid-cache to fix out of memory on active table. Add shared memory hashtable `monitoring_dbid_cache`, which keeps track of `diskquota_namespace.database_list`. When file_smgr* hook invoked, check the hash table first and filter out the dbid which should not be monitored. Signed-off-by: Sasasu --- diskquota.c | 31 ++++++++++++++++++++++++++----- diskquota.h | 5 +++++ diskquota_schedule | 1 + diskquota_schedule_int | 1 + expected/test_manytable.out | 24 ++++++++++++++++++++++++ gp_activetable.c | 14 +++++++++++++- gp_activetable.h | 1 + quotamodel.c | 18 ++++++++++++++++-- sql/test_manytable.sql | 29 +++++++++++++++++++++++++++++ 9 files changed, 116 insertions(+), 8 deletions(-) create mode 100644 expected/test_manytable.out create mode 100644 sql/test_manytable.sql diff --git a/diskquota.c b/diskquota.c index 5e998f24f2e..e6f90afce76 100644 --- a/diskquota.c +++ b/diskquota.c @@ -54,9 +54,6 @@ #include "diskquota.h" PG_MODULE_MAGIC; -/* max number of monitored database with diskquota enabled */ -#define MAX_NUM_MONITORED_DB 10 - #define DISKQUOTA_DB "diskquota" #define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" @@ -653,6 +650,13 @@ start_workers_from_dblist(void) ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid))); break; } + + /* put the dbid to monitoring database cache to filter out table not under + * monitoring. here is no need to consider alloc failure, checked before */ + LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); + hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, NULL); + LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); + } num_db = num; SPI_finish(); @@ -776,6 +780,9 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local static void on_add_db(Oid dbid, MessageResult * code) { + bool found = false; + void *enrty = NULL; + if (num_db >= MAX_NUM_MONITORED_DB) { *code = ERR_EXCEED; @@ -807,6 +814,16 @@ on_add_db(Oid dbid, MessageResult * code) *code = ERR_START_WORKER; ereport(ERROR, (errmsg("[diskquota launcher] failed to start worker - dbid=%u", dbid))); } + + LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); + enrty = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); + if (!found && enrty == NULL) + { + *code = ERR_EXCEED; + ereport(WARNING, + (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); + } + LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); } /* @@ -814,7 +831,7 @@ on_add_db(Oid dbid, MessageResult * code) * do our best to: * 1. kill the associated worker process * 2. delete dbid from diskquota_namespace.database_list - * 3. invalidate black-map entries from shared memory + * 3. invalidate black-map entries and monitoring_dbid_cache from shared memory */ static void on_del_db(Oid dbid, MessageResult * code) @@ -835,6 +852,10 @@ on_del_db(Oid dbid, MessageResult * code) PG_TRY(); { del_dbid_from_database_list(dbid); + + LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); + hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); } PG_CATCH(); { @@ -929,7 +950,7 @@ terminate_all_workers(void) /* * terminate the worker processes. since launcher will exit immediately, - * we skip to clear the disk_quota_worker_map + * we skip to clear the disk_quota_worker_map and monitoring_dbid_cache */ while ((hash_entry = hash_seq_search(&iter)) != NULL) { diff --git a/diskquota.h b/diskquota.h index 4957131d53e..03bfd19dc6b 100644 --- a/diskquota.h +++ b/diskquota.h @@ -3,6 +3,9 @@ #include "storage/lwlock.h" +/* max number of monitored database with diskquota enabled */ +#define MAX_NUM_MONITORED_DB 10 + typedef enum { NAMESPACE_QUOTA, @@ -21,12 +24,14 @@ typedef enum DISKQUOTA_READY_STATE } DiskQuotaState; +#define DiskQuotaLocksItemNumber (5) struct DiskQuotaLocks { LWLock *active_table_lock; LWLock *black_map_lock; LWLock *extension_ddl_message_lock; LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ + LWLock *monitoring_dbid_cache_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; diff --git a/diskquota_schedule b/diskquota_schedule index f1d01e9174f..bbcd6ce5105 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -9,5 +9,6 @@ test: test_partition test: test_vacuum test: test_primary_failure test: test_extension +test: test_manytable test: clean test: test_insert_after_drop diff --git a/diskquota_schedule_int b/diskquota_schedule_int index c7ea1b2f8a2..101b4388142 100644 --- a/diskquota_schedule_int +++ b/diskquota_schedule_int @@ -6,5 +6,6 @@ test: test_role test_schema test_drop_table test_column test_copy test_update te test: test_truncate test: test_delete_quota test: test_partition +test: test_manytable test: clean test: test_insert_after_drop diff --git a/expected/test_manytable.out b/expected/test_manytable.out new file mode 100644 index 00000000000..5302de48ec8 --- /dev/null +++ b/expected/test_manytable.out @@ -0,0 +1,24 @@ +-- start_ignore +-- test case manytable change cluster level config, can not run in parallel. +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +-- end_ignore +\! echo $? +0 +CREATE DATABASE test_manytable01; +CREATE DATABASE test_manytable02; +\c test_manytable01 +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); +\c test_manytable02 +CREATE TABLE b01(i int) DISTRIBUTED BY (i); +INSERT INTO b01 values(generate_series(0, 500)); +\c postgres +DROP DATABASE test_manytable01; +DROP DATABASE test_manytable02; +-- start_ignore +\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null +-- end_ignore diff --git a/gp_activetable.c b/gp_activetable.c index 6320f4a1685..fad77698c4d 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -51,6 +51,7 @@ typedef struct DiskQuotaSetOFCache } DiskQuotaSetOFCache; HTAB *active_tables_map = NULL; +HTAB *monitoring_dbid_cache = NULL; /* active table hooks which detect the disk file size change. */ static file_create_hook_type prev_file_create_hook = NULL; @@ -168,7 +169,18 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) { return; } - + + /* do not collect active table info when the database is not under monitoring. + * this operation is read-only and does not require absolutely exact. + * read the cache with out shared lock */ + hash_search(monitoring_dbid_cache, &relFileNode->node.dbNode, HASH_FIND, &found); + + if (!found) + { + return; + } + found = false; + MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); item.dbid = relFileNode->node.dbNode; item.relfilenode = relFileNode->node.relNode; diff --git a/gp_activetable.h b/gp_activetable.h index 1b975609665..44a54f5f128 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -24,6 +24,7 @@ extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); extern HTAB *active_tables_map; +extern HTAB *monitoring_dbid_cache; #define atooid(x) ((Oid) strtoul((x), NULL, 10)) diff --git a/quotamodel.c b/quotamodel.c index 2c0073c0d93..fe8bd2a203b 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -162,8 +162,8 @@ init_disk_quota_shmem(void) * resources in pgss_shmem_startup(). */ RequestAddinShmemSpace(DiskQuotaShmemSize()); - /* 4 locks for diskquota refer to init_lwlocks() for details */ - RequestAddinLWLocks(4); + /* locks for diskquota refer to init_lwlocks() for details */ + RequestAddinLWLocks(DiskQuotaLocksItemNumber); /* Install startup hook to initialize our shared memory. */ prev_shmem_startup_hook = shmem_startup_hook; @@ -212,6 +212,17 @@ disk_quota_shmem_startup(void) init_shm_worker_active_tables(); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(Oid); + hash_ctl.hash = oid_hash; + + monitoring_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", + MAX_NUM_MONITORED_DB, + MAX_NUM_MONITORED_DB, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + LWLockRelease(AddinShmemInitLock); } @@ -223,6 +234,7 @@ disk_quota_shmem_startup(void) * extension_ddl_message. * extension_ddl_lock is used to avoid concurrent diskquota * extension ddl(create/drop) command. + * monitoring_dbid_cache_lock is used to shared `monitoring_dbid_cache` on segment process. */ static void init_lwlocks(void) @@ -231,6 +243,7 @@ init_lwlocks(void) diskquota_locks.black_map_lock = LWLockAssign(); diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_ddl_lock = LWLockAssign(); + diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); } /* @@ -245,6 +258,7 @@ DiskQuotaShmemSize(void) size = sizeof(ExtensionDDLMessage); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); + size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); return size; } diff --git a/sql/test_manytable.sql b/sql/test_manytable.sql new file mode 100644 index 00000000000..eb90225f87b --- /dev/null +++ b/sql/test_manytable.sql @@ -0,0 +1,29 @@ +-- start_ignore +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +-- end_ignore +\! echo $? + +CREATE DATABASE test_manytable01; +CREATE DATABASE test_manytable02; + +\c test_manytable01 + +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); + +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); + +\c test_manytable02 +CREATE TABLE b01(i int) DISTRIBUTED BY (i); +INSERT INTO b01 values(generate_series(0, 500)); + +\c postgres +DROP DATABASE test_manytable01; +DROP DATABASE test_manytable02; + +-- start_ignore +\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null +-- end_ignore From c972ba139fb5e0530fe1cee9035f7eb07fafe97b Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 17 Dec 2020 10:24:05 +0800 Subject: [PATCH 058/330] fix typo in #55 Signed-off-by: Sasasu --- expected/test_table_size.out | 2 +- sql/test_table_size.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/expected/test_table_size.out b/expected/test_table_size.out index 70370f8af1b..36421dd9f19 100644 --- a/expected/test_table_size.out +++ b/expected/test_table_size.out @@ -12,7 +12,7 @@ select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -insert in buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; +insert into buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; ?column? diff --git a/sql/test_table_size.sql b/sql/test_table_size.sql index deefaa85646..aad12e837ad 100644 --- a/sql/test_table_size.sql +++ b/sql/test_table_size.sql @@ -7,7 +7,7 @@ insert into a select * from generate_series(1,10000); select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); -insert in buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; +insert into buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; From 8f659747f2b2a841845193d718dd538512372039 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Fri, 18 Dec 2020 14:27:09 +0800 Subject: [PATCH 059/330] Only collect active table info on diskquota enabled database (#57) A new shared memory is used in the segments to identify which table should be collected. Also related to PR#54 --- diskquota--1.0.sql | 6 ++++++ diskquota.c | 38 ++++++++++++--------------------- diskquota_utility.c | 52 +++++++++++++++++++++++++++++++++++++++++++++ gp_activetable.c | 3 ++- quotamodel.c | 17 +++++++++++++-- 5 files changed, 89 insertions(+), 27 deletions(-) diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 692979620de..af22a2ff76e 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -9,6 +9,7 @@ CREATE SCHEMA diskquota; CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT @@ -20,6 +21,11 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); diff --git a/diskquota.c b/diskquota.c index e6f90afce76..5d6594b19ec 100644 --- a/diskquota.c +++ b/diskquota.c @@ -538,7 +538,9 @@ create_monitor_db_table(void) bool ret = true; sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" + "create or replace function diskquota.update_diskquota_db_list(oid, int4) returns void " + "strict as '$libdir/diskquota' language C;"; StartTransactionCommand(); @@ -637,6 +639,7 @@ start_workers_from_dblist(void) ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid))); continue; } + elog(WARNING, "start workers"); if (!start_worker_by_dboid(dbid)) ereport(ERROR, (errmsg("[diskquota launcher] start worker process of database(oid:%u) failed", dbid))); num++; @@ -650,13 +653,6 @@ start_workers_from_dblist(void) ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid))); break; } - - /* put the dbid to monitoring database cache to filter out table not under - * monitoring. here is no need to consider alloc failure, checked before */ - LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, NULL); - LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); - } num_db = num; SPI_finish(); @@ -780,9 +776,6 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local static void on_add_db(Oid dbid, MessageResult * code) { - bool found = false; - void *enrty = NULL; - if (num_db >= MAX_NUM_MONITORED_DB) { *code = ERR_EXCEED; @@ -815,15 +808,6 @@ on_add_db(Oid dbid, MessageResult * code) ereport(ERROR, (errmsg("[diskquota launcher] failed to start worker - dbid=%u", dbid))); } - LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - enrty = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); - if (!found && enrty == NULL) - { - *code = ERR_EXCEED; - ereport(WARNING, - (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); - } - LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); } /* @@ -852,10 +836,6 @@ on_del_db(Oid dbid, MessageResult * code) PG_TRY(); { del_dbid_from_database_list(dbid); - - LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, NULL); - LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); } PG_CATCH(); { @@ -908,6 +888,16 @@ del_dbid_from_database_list(Oid dbid) { ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } + pfree(str.data); + + /* clean the dbid from shared memory*/ + initStringInfo(&str); + appendStringInfo(&str, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 1)" + " from gp_dist_random('gp_id');", dbid); + ret = SPI_execute(str.data, true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); + pfree(str.data); } /* diff --git a/diskquota_utility.c b/diskquota_utility.c index deefdebc8c0..be165b0d929 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -39,7 +39,10 @@ #include "utils/memutils.h" #include "utils/numeric.h" +#include + #include "diskquota.h" +#include "gp_activetable.h" /* disk quota helper function */ @@ -47,6 +50,7 @@ PG_FUNCTION_INFO_V1(init_table_size_table); PG_FUNCTION_INFO_V1(diskquota_start_worker); PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); +PG_FUNCTION_INFO_V1(update_diskquota_db_list); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 1200 @@ -635,3 +639,51 @@ get_size_in_mb(char *str) return result; } + +/* + * Function to update the db list on each segment + */ +Datum +update_diskquota_db_list(PG_FUNCTION_ARGS) +{ + Oid dbid = PG_GETARG_OID(0); + int mode = PG_GETARG_INT32(1); + bool found = false; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to update db list"))); + } + + /* add/remove the dbid to monitoring database cache to filter out table not under + * monitoring in hook functions + */ + + LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); + if (mode == 0) + { + Oid *entry = NULL; + entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); + elog(WARNING, "add dbid %u into SHM", dbid); + if (!found && entry == NULL) + { + ereport(WARNING, + (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); + } + } + else if (mode == 1) + { + hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); + if (!found) + { + ereport(WARNING, + (errmsg("cannot remove the database from db list, dbid not found"))); + } + } + LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); + + PG_RETURN_VOID(); + +} diff --git a/gp_activetable.c b/gp_activetable.c index fad77698c4d..1ebf8f3e475 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -162,6 +162,7 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) DiskQuotaActiveTableFileEntry *entry; DiskQuotaActiveTableFileEntry item; bool found = false; + Oid dbid = relFileNode->node.dbNode; /* We do not collect the active table in either master or mirror segments */ @@ -173,7 +174,7 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) /* do not collect active table info when the database is not under monitoring. * this operation is read-only and does not require absolutely exact. * read the cache with out shared lock */ - hash_search(monitoring_dbid_cache, &relFileNode->node.dbNode, HASH_FIND, &found); + hash_search(monitoring_dbid_cache, &dbid, HASH_FIND, &found); if (!found) { diff --git a/quotamodel.c b/quotamodel.c index fe8bd2a203b..70c7e811c1c 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -42,7 +42,9 @@ #include "utils/syscache.h" #include -#include +#include "cdb/cdbvars.h" +#include "cdb/cdbdisp_query.h" +#include "cdb/cdbdispatchresult.h" #include "gp_activetable.h" #include "diskquota.h" @@ -408,7 +410,17 @@ do_check_diskquota_state_is_ready(void) int ret; TupleDesc tupdesc; int i; - + StringInfoData sql_command; + + /* Add the dbid to watching list, so the hook can catch the table change*/ + initStringInfo(&sql_command); + appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id');", + MyDatabaseId); + ret = SPI_execute(sql_command.data, true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + pfree(sql_command.data); /* * check diskquota state from table diskquota.state errors will be catch * at upper level function. @@ -447,6 +459,7 @@ do_check_diskquota_state_is_ready(void) } ereport(WARNING, (errmsg("Diskquota is not in ready state. " "please run UDF init_table_size_table()"))); + return false; } From 20d08e23711b2997e206df2f68a46406986ac752 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Fri, 18 Dec 2020 09:02:28 +0000 Subject: [PATCH 060/330] fix create diskquota schema SQL missing --- diskquota.c | 1 + 1 file changed, 1 insertion(+) diff --git a/diskquota.c b/diskquota.c index 5d6594b19ec..a8b29d8d3d0 100644 --- a/diskquota.c +++ b/diskquota.c @@ -539,6 +539,7 @@ create_monitor_db_table(void) sql = "create schema if not exists diskquota_namespace;" "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" + "create schema if not exists diskquota;" "create or replace function diskquota.update_diskquota_db_list(oid, int4) returns void " "strict as '$libdir/diskquota' language C;"; From 33f69d6e7b4bb89880337785b331c6e4bf3fc9fd Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Fri, 18 Dec 2020 09:46:19 +0000 Subject: [PATCH 061/330] reschedule regress test, due to GPDB behavior change --- diskquota_schedule | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/diskquota_schedule b/diskquota_schedule index bbcd6ce5105..2a92cec86d0 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -1,7 +1,9 @@ test: init test: prepare -test: test_table_size +# disable this tese due to GPDB behavior change +# test: test_table_size test: test_fast_disk_check +test: test_insert_after_drop test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_truncate test: test_delete_quota @@ -11,4 +13,3 @@ test: test_primary_failure test: test_extension test: test_manytable test: clean -test: test_insert_after_drop From 656a5f3a067fd22fb81f2bf84be4231fa7202e10 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Fri, 18 Dec 2020 09:55:47 +0000 Subject: [PATCH 062/330] update diskquota_schedule_int to same as diskquota_schedule --- diskquota_schedule | 2 +- diskquota_schedule_int | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/diskquota_schedule b/diskquota_schedule index 2a92cec86d0..79f37f634b8 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -3,7 +3,7 @@ test: prepare # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check -test: test_insert_after_drop +#test: test_insert_after_drop test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_truncate test: test_delete_quota diff --git a/diskquota_schedule_int b/diskquota_schedule_int index 101b4388142..0183c92a3d5 100644 --- a/diskquota_schedule_int +++ b/diskquota_schedule_int @@ -1,6 +1,6 @@ test: init test: prepare -test: test_table_size +#test: test_table_size test: test_fast_disk_check test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test: test_truncate @@ -8,4 +8,4 @@ test: test_delete_quota test: test_partition test: test_manytable test: clean -test: test_insert_after_drop +#test: test_insert_after_drop From cb0a562731321dee0cfa843dc2790d6ff7ee942a Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 5 Jan 2021 09:58:58 +0000 Subject: [PATCH 063/330] Change test script for new centos7 image on concourse --- concourse/scripts/test_diskquota.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index f792406b3bc..31cc0524088 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -5,6 +5,8 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts +CUT_NUMBER=6 + source "${GPDB_CONCOURSE_DIR}/common.bash" function test(){ chown -R gpadmin:gpadmin ${TOP_DIR}; @@ -22,7 +24,7 @@ function test(){ trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT make installcheck [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 - ps -ef | grep postgres| grep qddir| cut -d ' ' -f 6 | xargs kill -9 + ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 export PGPORT=6001 echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh @@ -51,6 +53,9 @@ function _main() { time make_cluster time install_diskquota + if [ "${DISKQUOTA_OS}" == "rhel7" ]; then + CUT_NUMBER=5 + fi time test } From 42a2ad3271a5d2d4c77258ea17b8892f8f8a07f9 Mon Sep 17 00:00:00 2001 From: Haozhou Wang Date: Tue, 5 Jan 2021 10:24:41 +0000 Subject: [PATCH 064/330] Fix task yml to get the env param --- concourse/tasks/test_diskquota.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml index 0dadd31e9c2..aa622bacc7a 100644 --- a/concourse/tasks/test_diskquota.yml +++ b/concourse/tasks/test_diskquota.yml @@ -10,3 +10,4 @@ inputs: run: path: diskquota_src/concourse/scripts/test_diskquota.sh params: + DISKQUOTA_OS: From 47a021eb95c5095dee8d988c63e1b0456f6e2b00 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 27 Jul 2021 09:49:43 +0800 Subject: [PATCH 065/330] Fix pipeline (#59) * Fix test_diskquota.sh * Replace docker-image with registry-image --- concourse/pipeline/pipeline.yml | 29 +++++++++++++++++------------ concourse/scripts/test_diskquota.sh | 6 +++--- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index c2f115610ba..1557e2f0e0a 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -11,7 +11,7 @@ groups: resource_types: - name: gcs - type: docker-image + type: registry-image source: repository: frodenas/gcs-resource @@ -20,27 +20,27 @@ resources: # Image Resources - name: centos-gpdb-dev-6 - type: docker-image + type: registry-image source: - repository: pivotaldata/centos-gpdb-dev - tag: '6-gcc6.2-llvm3.7' + repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + tag: latest - name: centos-gpdb-dev-7 - type: docker-image + type: registry-image source: - repository: pivotaldata/centos-gpdb-dev - tag: '7-gcc6.2-llvm3.7' + repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test + tag: latest - name: ubuntu18-image-build - type: docker-image + type: registry-image source: - repository: pivotaldata/gpdb6-ubuntu18.04-build + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build tag: latest - name: ubuntu18-image-test - type: docker-image + type: registry-image source: - repository: pivotaldata/gpdb6-ubuntu18.04-test + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest # Github Source Codes @@ -103,6 +103,8 @@ jobs: image: centos-gpdb-dev-7 input_mapping: bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel7 - name: diskquota_centos6_build_test max_in_flight: 3 @@ -124,6 +126,8 @@ jobs: image: centos-gpdb-dev-6 input_mapping: bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel6 - name: diskquota_ubuntu18_build_test max_in_flight: 3 @@ -146,4 +150,5 @@ jobs: image: ubuntu18-image-test input_mapping: bin_diskquota: diskquota_artifacts - + params: + DISKQUOTA_OS: ubuntu18.04 diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 31cc0524088..54286c435b5 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -5,7 +5,7 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -CUT_NUMBER=6 +CUT_NUMBER=5 source "${GPDB_CONCOURSE_DIR}/common.bash" function test(){ @@ -53,8 +53,8 @@ function _main() { time make_cluster time install_diskquota - if [ "${DISKQUOTA_OS}" == "rhel7" ]; then - CUT_NUMBER=5 + if [ "${DISKQUOTA_OS}" == "ubuntu18.04" ]; then + CUT_NUMBER=6 fi time test From 3cef43b5535f29ea6f1d459408f840351f7cde2b Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 27 Jul 2021 15:56:02 +0800 Subject: [PATCH 066/330] Add upgrade job on pipeline(#60) Add the upgrade job in the pipeline This job tests the extension upgrading from 1.0 to 2.0. --- .gitignore | 3 + concourse/pipeline/pipeline.yml | 50 ++++++++++ concourse/scripts/test_common.sh | 41 ++++++++ concourse/scripts/test_diskquota.sh | 42 +-------- concourse/scripts/upgrade_extension.sh | 40 ++++++++ concourse/tasks/upgrade_extension.yml | 16 ++++ upgrade_test/Makefile | 5 + upgrade_test/README.md | 7 ++ upgrade_test/diskquota_schedule_upgrade | 10 ++ upgrade_test/expected/clean.out | 4 + upgrade_test/expected/dummy.out | 0 upgrade_test/expected/init.out | 21 +++++ upgrade_test/expected/install_new_version.out | 2 + upgrade_test/expected/prepare.out | 48 ++++++++++ upgrade_test/expected/set_config.out | 94 +++++++++++++++++++ upgrade_test/expected/test_delete_quota.out | 34 +++++++ upgrade_test/expected/test_rename.out | 64 +++++++++++++ upgrade_test/expected/test_reschema.out | 36 +++++++ upgrade_test/expected/test_role.out | 42 +++++++++ upgrade_test/expected/test_schema.out | 57 +++++++++++ upgrade_test/expected/test_temp_role.out | 36 +++++++ upgrade_test/expected/upgrade_extension.out | 2 + upgrade_test/init_file | 12 +++ upgrade_test/sql/clean.sql | 5 + upgrade_test/sql/dummy.sql | 0 upgrade_test/sql/init.sql | 19 ++++ upgrade_test/sql/install_new_version.sql | 2 + upgrade_test/sql/prepare.sql | 18 ++++ upgrade_test/sql/set_config.sql | 41 ++++++++ upgrade_test/sql/test.sh | 4 + upgrade_test/sql/test_delete_quota.sql | 19 ++++ upgrade_test/sql/test_manytable.sql | 29 ++++++ upgrade_test/sql/test_rename.sql | 50 ++++++++++ upgrade_test/sql/test_reschema.sql | 20 ++++ upgrade_test/sql/test_role.sql | 33 +++++++ upgrade_test/sql/test_schema.sql | 36 +++++++ upgrade_test/sql/test_temp_role.sql | 24 +++++ upgrade_test/sql/upgrade_extension.sql | 2 + 38 files changed, 928 insertions(+), 40 deletions(-) create mode 100644 concourse/scripts/test_common.sh create mode 100755 concourse/scripts/upgrade_extension.sh create mode 100644 concourse/tasks/upgrade_extension.yml create mode 100644 upgrade_test/Makefile create mode 100644 upgrade_test/README.md create mode 100644 upgrade_test/diskquota_schedule_upgrade create mode 100644 upgrade_test/expected/clean.out create mode 100644 upgrade_test/expected/dummy.out create mode 100644 upgrade_test/expected/init.out create mode 100644 upgrade_test/expected/install_new_version.out create mode 100644 upgrade_test/expected/prepare.out create mode 100644 upgrade_test/expected/set_config.out create mode 100644 upgrade_test/expected/test_delete_quota.out create mode 100644 upgrade_test/expected/test_rename.out create mode 100644 upgrade_test/expected/test_reschema.out create mode 100644 upgrade_test/expected/test_role.out create mode 100644 upgrade_test/expected/test_schema.out create mode 100644 upgrade_test/expected/test_temp_role.out create mode 100644 upgrade_test/expected/upgrade_extension.out create mode 100644 upgrade_test/init_file create mode 100644 upgrade_test/sql/clean.sql create mode 100644 upgrade_test/sql/dummy.sql create mode 100644 upgrade_test/sql/init.sql create mode 100644 upgrade_test/sql/install_new_version.sql create mode 100644 upgrade_test/sql/prepare.sql create mode 100644 upgrade_test/sql/set_config.sql create mode 100644 upgrade_test/sql/test.sh create mode 100644 upgrade_test/sql/test_delete_quota.sql create mode 100644 upgrade_test/sql/test_manytable.sql create mode 100644 upgrade_test/sql/test_rename.sql create mode 100644 upgrade_test/sql/test_reschema.sql create mode 100644 upgrade_test/sql/test_role.sql create mode 100644 upgrade_test/sql/test_schema.sql create mode 100644 upgrade_test/sql/test_temp_role.sql create mode 100644 upgrade_test/sql/upgrade_extension.sql diff --git a/.gitignore b/.gitignore index 23c8e91e16f..6ad453cf35c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,6 @@ regression.out regression.diffs /results/ +upgrade_test/regression.out +upgrade_test/regression.diffs +upgrade_test/results diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 1557e2f0e0a..12397adb87c 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -3,11 +3,20 @@ ## ====================================================================== groups: +- name: ALL + jobs: + - diskquota_centos6_build_test + - diskquota_centos7_build_test + - diskquota_ubuntu18_build_test + - diskquota_centos7_extension_upgrade_1.0_2.0 - name: GPDB6 jobs: - diskquota_centos6_build_test - diskquota_centos7_build_test - diskquota_ubuntu18_build_test +- name: GPDB6_UPGRADE + jobs: + - diskquota_centos7_extension_upgrade_1.0_2.0 resource_types: - name: gcs @@ -43,6 +52,20 @@ resources: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest +- name: bin_diskquota_centos7_1.0 + type: gcs + source: + bucket: {{gcs-bucket}} + json_key: {{concourse-gcs-resources-service-account-key}} + regexp: diskquota/released/gpdb6/diskquota-(1.*)-rhel7_x86_64.tar.gz + +- name: bin_diskquota_centos7 + type: gcs + source: + bucket: {{gcs-bucket-dev}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: diskquota/intermediates/gpdb6/diskquota-master-rhel7_x86_64.tar.gz + # Github Source Codes - name: gpdb_src @@ -105,6 +128,10 @@ jobs: bin_diskquota: diskquota_artifacts params: DISKQUOTA_OS: rhel7 + - aggregate: + - put: bin_diskquota_centos7 + params: + file: diskquota_artifacts/diskquota*.tar.gz - name: diskquota_centos6_build_test max_in_flight: 3 @@ -152,3 +179,26 @@ jobs: bin_diskquota: diskquota_artifacts params: DISKQUOTA_OS: ubuntu18.04 + +- name: diskquota_centos7_extension_upgrade_1.0_2.0 + max_in_flight: 3 + plan: + - aggregate: + - get: centos-gpdb-dev-7 + - get: bin_diskquota_old + resource: bin_diskquota_centos7_1.0 + - get: bin_diskquota_centos7 + trigger: true + - get: bin_gpdb + resource: bin_gpdb_centos7 + - get: gpdb_src + - get: diskquota_src + - task: upgrade_extension + file: diskquota_src/concourse/tasks/upgrade_extension.yml + input_mapping: + bin_diskquota_new: bin_diskquota_centos7 + image: centos-gpdb-dev-7 + params: + DISKQUOTA_OS: rhel7 + OLD_VERSION: "1.0" + NEW_VERSION: "2.0" diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh new file mode 100644 index 00000000000..5d19539aba2 --- /dev/null +++ b/concourse/scripts/test_common.sh @@ -0,0 +1,41 @@ +# the directory to run the "make install" as the param +function test(){ + chown -R gpadmin:gpadmin ${TOP_DIR}; + cat > /home/gpadmin/test.sh <<-EOF + set -exo pipefail + source gpdb_src/gpAux/gpdemo/gpdemo-env.sh + echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh + source /usr/local/greenplum-db-devel/greenplum_path.sh + createdb diskquota + gpconfig -c shared_preload_libraries -v 'diskquota' + gpstop -arf + gpconfig -c diskquota.naptime -v 1 + gpstop -arf + # the dir to run the "make install" command + pushd $1 + trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT + make installcheck + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 + export PGPORT=6001 + echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh + source /usr/local/greenplum-db-devel/greenplum_path.sh + rm /tmp/.s.PGSQL.6000* + gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby + make installcheck + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + popd + EOF + export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + chown gpadmin:gpadmin /home/gpadmin/test.sh + chmod a+x /home/gpadmin/test.sh + su gpadmin -c "bash /home/gpadmin/test.sh" +} + +function setup_gpadmin_user() { + ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash +} + +function install_diskquota() { + tar -xzf bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel +} diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 54286c435b5..b7573776540 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -8,45 +8,7 @@ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts CUT_NUMBER=5 source "${GPDB_CONCOURSE_DIR}/common.bash" -function test(){ - chown -R gpadmin:gpadmin ${TOP_DIR}; - cat > /home/gpadmin/test.sh <<-EOF - set -exo pipefail - source gpdb_src/gpAux/gpdemo/gpdemo-env.sh - echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh - source /usr/local/greenplum-db-devel/greenplum_path.sh - createdb diskquota - gpconfig -c shared_preload_libraries -v 'diskquota' - gpstop -arf - gpconfig -c diskquota.naptime -v 1 - gpstop -arf - pushd diskquota_src - trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT - make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 - ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 - export PGPORT=6001 - echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh - source /usr/local/greenplum-db-devel/greenplum_path.sh - rm /tmp/.s.PGSQL.6000* - gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby - make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 - popd - EOF - export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 - chown gpadmin:gpadmin /home/gpadmin/test.sh - chmod a+x /home/gpadmin/test.sh - su gpadmin -c "bash /home/gpadmin/test.sh" -} - -function setup_gpadmin_user() { - ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash -} - -function install_diskquota() { - tar -xzf bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel -} +source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" function _main() { time install_gpdb time setup_gpadmin_user @@ -57,7 +19,7 @@ function _main() { CUT_NUMBER=6 fi - time test + time test ${TOP_DIR}/diskquota_src/ } _main "$@" diff --git a/concourse/scripts/upgrade_extension.sh b/concourse/scripts/upgrade_extension.sh new file mode 100755 index 00000000000..10316486346 --- /dev/null +++ b/concourse/scripts/upgrade_extension.sh @@ -0,0 +1,40 @@ +#!/bin/bash -l + +set -exo pipefail + +CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +TOP_DIR=${CWDIR}/../../../ +GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts +CUT_NUMBER=5 + +source "${GPDB_CONCOURSE_DIR}/common.bash" +source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" + +function install_old_version_diskquota() { + tar -xzf bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel +} + +# this function is called by upgrade_test/sql/upgrade_extension.sql +function install_new_version_diskquota() { + # the current dir is upgrade_test + tar -xzf ../../bin_diskquota_new/*.tar.gz -C /usr/local/greenplum-db-devel +} + +function _main() { + time install_gpdb + time setup_gpadmin_user + + time make_cluster + if [ "${DISKQUOTA_OS}" == "ubuntu18.04" ]; then + CUT_NUMBER=6 + fi + + # install old_version diskquota + time install_old_version_diskquota + # export install_new_version_diskquota function, becuase it will + # be called by upgrade_test/sql/upgrade_extension.sql + export -f install_new_version_diskquota + time test ${TOP_DIR}/diskquota_src/upgrade_test +} + +_main "$@" diff --git a/concourse/tasks/upgrade_extension.yml b/concourse/tasks/upgrade_extension.yml new file mode 100644 index 00000000000..d0d1bf9d14d --- /dev/null +++ b/concourse/tasks/upgrade_extension.yml @@ -0,0 +1,16 @@ +platform: linux +image_resource: + type: docker-image +inputs: + - name: bin_gpdb + - name: bin_diskquota_old + - name: bin_diskquota_new + - name: gpdb_src + - name: diskquota_src + +run: + path: diskquota_src/concourse/scripts/upgrade_extension.sh +params: + DISKQUOTA_OS: + OLD_VERSION: + NEW_VERSION: diff --git a/upgrade_test/Makefile b/upgrade_test/Makefile new file mode 100644 index 00000000000..7acfdda526d --- /dev/null +++ b/upgrade_test/Makefile @@ -0,0 +1,5 @@ +REGRESS = dummy + +REGRESS_OPTS = --schedule=diskquota_schedule_upgrade --init-file=init_file +PGXS := $(shell pg_config --pgxs) +include $(PGXS) diff --git a/upgrade_test/README.md b/upgrade_test/README.md new file mode 100644 index 00000000000..fc8c1b23033 --- /dev/null +++ b/upgrade_test/README.md @@ -0,0 +1,7 @@ +# upgrade_extension test +The upgrade_extension test case will fail if +run it locally. Because it calls +"install_new_version_diskquota" function which is +defined in concourse/scripts/upgrade_extension.sh. +You can write this function by yourself and +export it locally if you want to run it successfully. diff --git a/upgrade_test/diskquota_schedule_upgrade b/upgrade_test/diskquota_schedule_upgrade new file mode 100644 index 00000000000..fe44b7a10f7 --- /dev/null +++ b/upgrade_test/diskquota_schedule_upgrade @@ -0,0 +1,10 @@ +# run by old version diskquota +test: init +test: prepare +test: set_config +# upgrade diskquota to new version +test: install_new_version +test: upgrade_extension +# run by new version diskquota +test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota +test: clean diff --git a/upgrade_test/expected/clean.out b/upgrade_test/expected/clean.out new file mode 100644 index 00000000000..4712add5c30 --- /dev/null +++ b/upgrade_test/expected/clean.out @@ -0,0 +1,4 @@ +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; +DROP EXTENSION diskquota; diff --git a/upgrade_test/expected/dummy.out b/upgrade_test/expected/dummy.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/upgrade_test/expected/init.out b/upgrade_test/expected/init.out new file mode 100644 index 00000000000..0f149d65ca9 --- /dev/null +++ b/upgrade_test/expected/init.out @@ -0,0 +1,21 @@ +-- start_ignore +\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpconfig -c max_worker_processes -v 20 > /dev/null +-- end_ignore +\! echo $? +0 +-- start_ignore +\! gpstop -raf > /dev/null +-- end_ignore +\! echo $? +0 +\! sleep 10 diff --git a/upgrade_test/expected/install_new_version.out b/upgrade_test/expected/install_new_version.out new file mode 100644 index 00000000000..cbc8a21ca0b --- /dev/null +++ b/upgrade_test/expected/install_new_version.out @@ -0,0 +1,2 @@ +\! install_new_version_diskquota +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/prepare.out b/upgrade_test/expected/prepare.out new file mode 100644 index 00000000000..80f84f0c281 --- /dev/null +++ b/upgrade_test/expected/prepare.out @@ -0,0 +1,48 @@ +CREATE EXTENSION diskquota; +-- start_ignore +\! gpstop -u +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Starting gpstop with args: -u +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Gathering information and validating the environment... +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Segment details from master... +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.16105.gdfbfc2b build dev' +20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Signalling all postmaster processes to reload +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- end_ignore +SELECT pg_sleep(15); + pg_sleep +---------- + +(1 row) + +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +SELECT diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +DROP ROLE IF EXISTS testbody; +NOTICE: role "testbody" does not exist, skipping +CREATE ROLE testbody; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE badquota.t1(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); +ERROR: schema's disk space quota exceeded with name:badquota diff --git a/upgrade_test/expected/set_config.out b/upgrade_test/expected/set_config.out new file mode 100644 index 00000000000..221aaf76518 --- /dev/null +++ b/upgrade_test/expected/set_config.out @@ -0,0 +1,94 @@ +-- Test schema +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- test rename schema +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- test rename role +CREATE SCHEMA srr1; +DROP ROLE IF EXISTS srerole; +CREATE ROLE srerole NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('srerole', '1MB'); + set_role_quota +---------------- + +(1 row) + +-- Test re-set_schema_quota +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- Test role quota +CREATE SCHEMA srole; +DROP ROLE IF EXISTS u1; +NOTICE: role "u1" does not exist, skipping +DROP ROLE IF EXISTS u2; +NOTICE: role "u2" does not exist, skipping +CREATE ROLE u1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE u2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t TEXT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b2 OWNER TO u1; +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +-- Test temp table restrained by role id +CREATE SCHEMA strole; +DROP ROLE IF EXISTS u3temp; +CREATE ROLE u3temp NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('u3temp', '1MB'); + set_role_quota +---------------- + +(1 row) + +-- Test toast +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- Test truncate +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); + set_schema_quota +------------------ + +(1 row) + diff --git a/upgrade_test/expected/test_delete_quota.out b/upgrade_test/expected/test_delete_quota.out new file mode 100644 index 00000000000..cbc3928ed9a --- /dev/null +++ b/upgrade_test/expected/test_delete_quota.out @@ -0,0 +1,34 @@ +-- Test delete disk quota +-- CREATE SCHEMA deleteschema; +-- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); +SET search_path TO deleteschema; +CREATE TABLE c (i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:deleteschema +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +INSERT INTO c SELECT generate_series(1,100); +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/upgrade_test/expected/test_rename.out b/upgrade_test/expected/test_rename.out new file mode 100644 index 00000000000..198d5609c63 --- /dev/null +++ b/upgrade_test/expected/test_rename.out @@ -0,0 +1,64 @@ +-- test rename schema +-- CREATE SCHEMA srs1; +-- SELECT diskquota.set_schema_quota('srs1', '1 MB'); +set search_path to srs1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs1 +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name:srs2 +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; +-- test rename role +-- CREATE SCHEMA srr1; +-- CREATE ROLE srerole NOLOGIN; +-- SELECT diskquota.set_role_quota('srerole', '1MB'); +SET search_path TO srr1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO srerole; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole +ALTER ROLE srerole RENAME TO srerole2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name:srerole2 +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; diff --git a/upgrade_test/expected/test_reschema.out b/upgrade_test/expected/test_reschema.out new file mode 100644 index 00000000000..0bbe07c3881 --- /dev/null +++ b/upgrade_test/expected/test_reschema.out @@ -0,0 +1,36 @@ +-- Test re-set_schema_quota +-- CREATE SCHEMA srE; +-- SELECT diskquota.set_schema_quota('srE', '1 MB'); +SET search_path TO srE; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail when exceed quota limit +INSERT INTO a SELECT generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name:sre +-- set schema quota larger +SELECT diskquota.set_schema_quota('srE', '1 GB'); + set_schema_quota +------------------ + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,1000); +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; diff --git a/upgrade_test/expected/test_role.out b/upgrade_test/expected/test_role.out new file mode 100644 index 00000000000..bfbc5960aa3 --- /dev/null +++ b/upgrade_test/expected/test_role.out @@ -0,0 +1,42 @@ +-- Test role quota +-- CREATE SCHEMA srole; +-- SET search_path TO srole; +-- +-- CREATE ROLE u1 NOLOGIN; +-- CREATE ROLE u2 NOLOGIN; +-- CREATE TABLE b (t TEXT); +-- ALTER TABLE b OWNER TO u1; +-- CREATE TABLE b2 (t TEXT); +-- ALTER TABLE b2 OWNER TO u1; +-- +-- SELECT diskquota.set_role_quota('u1', '1 MB'); +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u1 +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u1 +ALTER TABLE b OWNER TO u2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +DROP TABLE b, b2; +DROP ROLE u1, u2; +RESET search_path; +DROP SCHEMA srole; diff --git a/upgrade_test/expected/test_schema.out b/upgrade_test/expected/test_schema.out new file mode 100644 index 00000000000..aa2011dba28 --- /dev/null +++ b/upgrade_test/expected/test_schema.out @@ -0,0 +1,57 @@ +-- Test schema +-- CREATE SCHEMA s1; +-- SELECT diskquota.set_schema_quota('s1', '1 MB'); +SET search_path TO s1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +CREATE TABLE a2(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +-- Test alter table set schema +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO s2.a SELECT generate_series(1,200); +ALTER TABLE s2.a SET SCHEMA badquota; +-- expect failed +INSERT INTO badquota.a SELECT generate_series(0, 100); +ERROR: schema's disk space quota exceeded with name:badquota +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; + schema_name | quota_in_mb +-------------+------------- + s1 | 1 +(1 row) + +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2; diff --git a/upgrade_test/expected/test_temp_role.out b/upgrade_test/expected/test_temp_role.out new file mode 100644 index 00000000000..f867127339f --- /dev/null +++ b/upgrade_test/expected/test_temp_role.out @@ -0,0 +1,36 @@ +-- Test temp table restrained by role id +-- CREATE SCHEMA strole; +-- CREATE ROLE u3temp NOLOGIN; +SET search_path TO strole; +-- SELECT diskquota.set_role_quota('u3temp', '1MB'); +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE ta OWNER TO u3temp; +-- expected failed: fill temp table +INSERT INTO ta SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expected failed: +INSERT INTO a SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u3temp +DROP TABLE ta; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/upgrade_test/expected/upgrade_extension.out b/upgrade_test/expected/upgrade_extension.out new file mode 100644 index 00000000000..4f3982d3c80 --- /dev/null +++ b/upgrade_test/expected/upgrade_extension.out @@ -0,0 +1,2 @@ +\set new_version `echo $NEW_VERSION` +alter extension diskquota update to :'new_version'; diff --git a/upgrade_test/init_file b/upgrade_test/init_file new file mode 100644 index 00000000000..5261e4efb5d --- /dev/null +++ b/upgrade_test/init_file @@ -0,0 +1,12 @@ +-- This file contains global patterns of messages that should be ignored or +-- masked out, when comparing test results with the expected output. +-- Individual tests can contain additional patterns specific to the test. + +-- start_matchignore +-- end_matchignore +-- start_matchsubs +m/diskquota.c:\d+\)/ +s/diskquota.c:\d+\)/diskquota.c:xxx/ +m/diskquota_utility.c:\d+\)/ +s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ +-- end_matchsubs diff --git a/upgrade_test/sql/clean.sql b/upgrade_test/sql/clean.sql new file mode 100644 index 00000000000..bf71fcb0d19 --- /dev/null +++ b/upgrade_test/sql/clean.sql @@ -0,0 +1,5 @@ +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; + +DROP EXTENSION diskquota; diff --git a/upgrade_test/sql/dummy.sql b/upgrade_test/sql/dummy.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/upgrade_test/sql/init.sql b/upgrade_test/sql/init.sql new file mode 100644 index 00000000000..e8b1d49854f --- /dev/null +++ b/upgrade_test/sql/init.sql @@ -0,0 +1,19 @@ +-- start_ignore +\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +-- end_ignore +\! echo $? +-- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null +-- end_ignore +\! echo $? +-- start_ignore +\! gpconfig -c max_worker_processes -v 20 > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpstop -raf > /dev/null +-- end_ignore +\! echo $? + +\! sleep 10 diff --git a/upgrade_test/sql/install_new_version.sql b/upgrade_test/sql/install_new_version.sql new file mode 100644 index 00000000000..cbc8a21ca0b --- /dev/null +++ b/upgrade_test/sql/install_new_version.sql @@ -0,0 +1,2 @@ +\! install_new_version_diskquota +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/prepare.sql b/upgrade_test/sql/prepare.sql new file mode 100644 index 00000000000..610e3df17d4 --- /dev/null +++ b/upgrade_test/sql/prepare.sql @@ -0,0 +1,18 @@ +CREATE EXTENSION diskquota; +-- start_ignore +\! gpstop -u +SELECT diskquota.init_table_size_table(); +-- end_ignore +SELECT pg_sleep(15); + +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +SELECT diskquota.set_schema_quota('badquota', '1 MB'); +DROP ROLE IF EXISTS testbody; +CREATE ROLE testbody; +CREATE TABLE badquota.t1(i INT); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/upgrade_test/sql/set_config.sql b/upgrade_test/sql/set_config.sql new file mode 100644 index 00000000000..316dcc913ca --- /dev/null +++ b/upgrade_test/sql/set_config.sql @@ -0,0 +1,41 @@ +-- Test schema +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); +-- test rename schema +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); +-- test rename role +CREATE SCHEMA srr1; +DROP ROLE IF EXISTS srerole; +CREATE ROLE srerole NOLOGIN; +SELECT diskquota.set_role_quota('srerole', '1MB'); +-- Test re-set_schema_quota +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); +-- Test role quota +CREATE SCHEMA srole; + +DROP ROLE IF EXISTS u1; +DROP ROLE IF EXISTS u2; +CREATE ROLE u1 NOLOGIN; +CREATE ROLE u2 NOLOGIN; +CREATE TABLE b (t TEXT); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t TEXT); +ALTER TABLE b2 OWNER TO u1; + +SELECT diskquota.set_role_quota('u1', '1 MB'); +-- Test temp table restrained by role id +CREATE SCHEMA strole; +DROP ROLE IF EXISTS u3temp; +CREATE ROLE u3temp NOLOGIN; +SELECT diskquota.set_role_quota('u3temp', '1MB'); +-- Test toast +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); +-- Test truncate +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); diff --git a/upgrade_test/sql/test.sh b/upgrade_test/sql/test.sh new file mode 100644 index 00000000000..a720ced3678 --- /dev/null +++ b/upgrade_test/sql/test.sh @@ -0,0 +1,4 @@ +function install_new_version_diskquota() { + echo "install_new_version" +} +export -f install_new_version_diskquota diff --git a/upgrade_test/sql/test_delete_quota.sql b/upgrade_test/sql/test_delete_quota.sql new file mode 100644 index 00000000000..5f5abfc99f1 --- /dev/null +++ b/upgrade_test/sql/test_delete_quota.sql @@ -0,0 +1,19 @@ +-- Test delete disk quota +-- CREATE SCHEMA deleteschema; +-- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); +SET search_path TO deleteschema; + +CREATE TABLE c (i INT); +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT pg_sleep(10); +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); +SELECT pg_sleep(20); + +INSERT INTO c SELECT generate_series(1,100); + +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/upgrade_test/sql/test_manytable.sql b/upgrade_test/sql/test_manytable.sql new file mode 100644 index 00000000000..dbd64ea64e7 --- /dev/null +++ b/upgrade_test/sql/test_manytable.sql @@ -0,0 +1,29 @@ +-- start_ignore +-- \! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +-- end_ignore +-- \! echo $? + +CREATE DATABASE test_manytable01; +CREATE DATABASE test_manytable02; + +\c test_manytable01 + +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); + +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); + +\c test_manytable02 +CREATE TABLE b01(i int) DISTRIBUTED BY (i); +INSERT INTO b01 values(generate_series(0, 500)); + +\c postgres +DROP DATABASE test_manytable01; +DROP DATABASE test_manytable02; + +-- start_ignore +\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null +-- end_ignore diff --git a/upgrade_test/sql/test_rename.sql b/upgrade_test/sql/test_rename.sql new file mode 100644 index 00000000000..394592322a1 --- /dev/null +++ b/upgrade_test/sql/test_rename.sql @@ -0,0 +1,50 @@ +-- test rename schema +-- CREATE SCHEMA srs1; +-- SELECT diskquota.set_schema_quota('srs1', '1 MB'); +set search_path to srs1; +CREATE TABLE a(i int); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); + +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; + +-- test rename role +-- CREATE SCHEMA srr1; +-- CREATE ROLE srerole NOLOGIN; +-- SELECT diskquota.set_role_quota('srerole', '1MB'); +SET search_path TO srr1; +CREATE TABLE a(i int); +ALTER TABLE a OWNER TO srerole; + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ALTER ROLE srerole RENAME TO srerole2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); + +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; + diff --git a/upgrade_test/sql/test_reschema.sql b/upgrade_test/sql/test_reschema.sql new file mode 100644 index 00000000000..0c5dca3e1e0 --- /dev/null +++ b/upgrade_test/sql/test_reschema.sql @@ -0,0 +1,20 @@ +-- Test re-set_schema_quota +-- CREATE SCHEMA srE; +-- SELECT diskquota.set_schema_quota('srE', '1 MB'); +SET search_path TO srE; +CREATE TABLE a(i int); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail when exceed quota limit +INSERT INTO a SELECT generate_series(1,1000); +-- set schema quota larger +SELECT diskquota.set_schema_quota('srE', '1 GB'); +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,1000); + +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; + diff --git a/upgrade_test/sql/test_role.sql b/upgrade_test/sql/test_role.sql new file mode 100644 index 00000000000..f9f8bd0e4c7 --- /dev/null +++ b/upgrade_test/sql/test_role.sql @@ -0,0 +1,33 @@ +-- Test role quota + +-- CREATE SCHEMA srole; +-- SET search_path TO srole; +-- +-- CREATE ROLE u1 NOLOGIN; +-- CREATE ROLE u2 NOLOGIN; +-- CREATE TABLE b (t TEXT); +-- ALTER TABLE b OWNER TO u1; +-- CREATE TABLE b2 (t TEXT); +-- ALTER TABLE b2 OWNER TO u1; +-- +-- SELECT diskquota.set_role_quota('u1', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO u2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); + +DROP TABLE b, b2; +DROP ROLE u1, u2; +RESET search_path; +DROP SCHEMA srole; diff --git a/upgrade_test/sql/test_schema.sql b/upgrade_test/sql/test_schema.sql new file mode 100644 index 00000000000..ace95c36736 --- /dev/null +++ b/upgrade_test/sql/test_schema.sql @@ -0,0 +1,36 @@ +-- Test schema +-- CREATE SCHEMA s1; +-- SELECT diskquota.set_schema_quota('s1', '1 MB'); +SET search_path TO s1; + +CREATE TABLE a(i int); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO s2.a SELECT generate_series(1,200); + +ALTER TABLE s2.a SET SCHEMA badquota; +-- expect failed +INSERT INTO badquota.a SELECT generate_series(0, 100); + +SELECT pg_sleep(10); +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; + +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2; + diff --git a/upgrade_test/sql/test_temp_role.sql b/upgrade_test/sql/test_temp_role.sql new file mode 100644 index 00000000000..9af1d9723d5 --- /dev/null +++ b/upgrade_test/sql/test_temp_role.sql @@ -0,0 +1,24 @@ +-- Test temp table restrained by role id +-- CREATE SCHEMA strole; +-- CREATE ROLE u3temp NOLOGIN; +SET search_path TO strole; + +-- SELECT diskquota.set_role_quota('u3temp', '1MB'); +CREATE TABLE a(i int); +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +ALTER TABLE ta OWNER TO u3temp; + +-- expected failed: fill temp table +INSERT INTO ta SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expected failed: +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE ta; +SELECT pg_sleep(20); +INSERT INTO a SELECT generate_series(1,100); + +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/upgrade_test/sql/upgrade_extension.sql b/upgrade_test/sql/upgrade_extension.sql new file mode 100644 index 00000000000..4f3982d3c80 --- /dev/null +++ b/upgrade_test/sql/upgrade_extension.sql @@ -0,0 +1,2 @@ +\set new_version `echo $NEW_VERSION` +alter extension diskquota update to :'new_version'; From f08ecdb4b2074873674f9c25b98dc98ccdf698c1 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 27 Jul 2021 17:27:46 +0800 Subject: [PATCH 067/330] Fix upgrade pipeline (#61) upgrade test doesn't need activating standby and running regress test again. --- concourse/scripts/test_common.sh | 22 +++++++++++++--------- concourse/scripts/test_diskquota.sh | 2 +- concourse/scripts/upgrade_extension.sh | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh index 5d19539aba2..d3cf8f88084 100644 --- a/concourse/scripts/test_common.sh +++ b/concourse/scripts/test_common.sh @@ -1,4 +1,6 @@ -# the directory to run the "make install" as the param +# the directory to run the "make install" as the first param +# the second param is a bool var, used to judge if need to active the standby +# and run the regress test again function test(){ chown -R gpadmin:gpadmin ${TOP_DIR}; cat > /home/gpadmin/test.sh <<-EOF @@ -16,14 +18,16 @@ function test(){ trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT make installcheck [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 - ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 - export PGPORT=6001 - echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh - source /usr/local/greenplum-db-devel/greenplum_path.sh - rm /tmp/.s.PGSQL.6000* - gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby - make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + if $2 ; then + ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 + export PGPORT=6001 + echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh + source /usr/local/greenplum-db-devel/greenplum_path.sh + rm /tmp/.s.PGSQL.6000* + gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby + make installcheck + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + fi popd EOF export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index b7573776540..69d7e943261 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -19,7 +19,7 @@ function _main() { CUT_NUMBER=6 fi - time test ${TOP_DIR}/diskquota_src/ + time test ${TOP_DIR}/diskquota_src/ true } _main "$@" diff --git a/concourse/scripts/upgrade_extension.sh b/concourse/scripts/upgrade_extension.sh index 10316486346..c1523f01caa 100755 --- a/concourse/scripts/upgrade_extension.sh +++ b/concourse/scripts/upgrade_extension.sh @@ -34,7 +34,7 @@ function _main() { # export install_new_version_diskquota function, becuase it will # be called by upgrade_test/sql/upgrade_extension.sql export -f install_new_version_diskquota - time test ${TOP_DIR}/diskquota_src/upgrade_test + time test ${TOP_DIR}/diskquota_src/upgrade_test false } _main "$@" From 6ae280b579191e5936223401901de8a201036bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Tue, 10 Aug 2021 11:12:33 +0800 Subject: [PATCH 068/330] Support tablespace-based quota (#58) Users can create disk quota for a specified tablespace based on one schema or one role. The diskquota extension has upgraded to 2.0 version. diskquota--2.0.sql and diskquota--1.0--2.0.sql have been added. Co-authored-by: Xiaoran Wang --- .gitignore | 3 + Makefile | 2 +- concourse/scripts/build_diskquota.sh | 6 + diskquota--1.0--2.0.sql | 58 ++ diskquota--2.0.sql | 115 ++++ diskquota.control | 2 +- diskquota.h | 8 +- diskquota_schedule | 2 +- diskquota_utility.c | 152 +++++- expected/prepare.out | 2 + expected/test_tablespace_role.out | 151 ++++++ expected/test_tablespace_schema.out | 139 +++++ quotamodel.c | 785 +++++++++++++++------------ sql/prepare.sql | 1 + sql/test_tablespace_role.sql | 85 +++ sql/test_tablespace_schema.sql | 75 +++ 16 files changed, 1224 insertions(+), 362 deletions(-) create mode 100644 diskquota--1.0--2.0.sql create mode 100644 diskquota--2.0.sql create mode 100644 expected/test_tablespace_role.out create mode 100644 expected/test_tablespace_schema.out create mode 100644 sql/test_tablespace_role.sql create mode 100644 sql/test_tablespace_schema.sql diff --git a/.gitignore b/.gitignore index 6ad453cf35c..bc12174ab3b 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,9 @@ regression.out regression.diffs /results/ +<<<<<<< HEAD + +.vscode upgrade_test/regression.out upgrade_test/regression.diffs upgrade_test/results diff --git a/Makefile b/Makefile index c4f84b33902..b7ce8b4e300 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ MODULE_big = diskquota EXTENSION = diskquota -DATA = diskquota--1.0.sql +DATA = diskquota--1.0.sql diskquota--2.0.sql diskquota--1.0--2.0.sql SRCDIR = ./ FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 1c18f57ae03..b0f60df46cf 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -26,6 +26,8 @@ function pkg() { lib/postgresql/diskquota.so \ share/postgresql/extension/diskquota.control \ share/postgresql/extension/diskquota--1.0.sql \ + share/postgresql/extension/diskquota--2.0.sql \ + share/postgresql/extension/diskquota--1.0--2.0.sql \ install_gpdb_component ;; rhel7) @@ -33,6 +35,8 @@ function pkg() { lib/postgresql/diskquota.so \ share/postgresql/extension/diskquota.control \ share/postgresql/extension/diskquota--1.0.sql \ + share/postgresql/extension/diskquota--2.0.sql \ + share/postgresql/extension/diskquota--1.0--2.0.sql \ install_gpdb_component ;; ubuntu18.04) @@ -40,6 +44,8 @@ function pkg() { lib/postgresql/diskquota.so \ share/postgresql/extension/diskquota.control \ share/postgresql/extension/diskquota--1.0.sql \ + share/postgresql/extension/diskquota--2.0.sql \ + share/postgresql/extension/diskquota--1.0--2.0.sql \ install_gpdb_component ;; *) echo "Unknown OS: $OSVER"; exit 1 ;; diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql new file mode 100644 index 00000000000..3816b3bf4be --- /dev/null +++ b/diskquota--1.0--2.0.sql @@ -0,0 +1,58 @@ +CREATE TABLE diskquota.target ( + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 +group by relnamespace, qc.quotalimitMB, pgns.nspname +order by pgns.nspname; + +CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 +GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; + +CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespcae_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns, + pg_tablespace as pgsp, + diskquota.target as t +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname +order by pgns.nspname, pgsp.spcname; + +CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_tablespace_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr, + pg_tablespace as pgsp, + diskquota.target as t +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql new file mode 100644 index 00000000000..2a5c8b58f31 --- /dev/null +++ b/diskquota--2.0.sql @@ -0,0 +1,115 @@ +/* contrib/diskquota/diskquota--2.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- Configuration table +CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); + +CREATE TABLE diskquota.target ( + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE FUNCTION diskquota.set_role_quota(text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + + +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); + +CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); + +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; + +CREATE FUNCTION diskquota.diskquota_start_worker() +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE FUNCTION diskquota.init_table_size_table() +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 +group by relnamespace, qc.quotalimitMB, pgns.nspname +order by pgns.nspname; + +CREATE VIEW diskquota.show_fast_role_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 +GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespcae_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns, + pg_tablespace as pgsp, + diskquota.target as t +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname +order by pgns.nspname, pgsp.spcname; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_tablespace_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr, + pg_tablespace as pgsp, + diskquota.target as t +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; + +CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); + +CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type +AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' +LANGUAGE C VOLATILE; + +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota.control b/diskquota.control index 6c25e7cd945..aa8c4b083bb 100644 --- a/diskquota.control +++ b/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' -default_version = '1.0' +default_version = '2.0' module_pathname = '$libdir/diskquota' relocatable = true diff --git a/diskquota.h b/diskquota.h index 03bfd19dc6b..9cff8633ce8 100644 --- a/diskquota.h +++ b/diskquota.h @@ -8,8 +8,12 @@ typedef enum { - NAMESPACE_QUOTA, - ROLE_QUOTA + NAMESPACE_QUOTA = 0, + ROLE_QUOTA, + NAMESPACE_TABLESPACE_QUOTA, + ROLE_TABLESPACE_QUOTA, + + NUM_QUOTA_TYPES } QuotaType; typedef enum diff --git a/diskquota_schedule b/diskquota_schedule index 79f37f634b8..4c0903a4877 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -4,7 +4,7 @@ test: prepare # test: test_table_size test: test_fast_disk_check #test: test_insert_after_drop -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test: test_truncate test: test_delete_quota test: test_partition diff --git a/diskquota_utility.c b/diskquota_utility.c index be165b0d929..6087a452b0f 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -28,6 +28,7 @@ #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/extension.h" +#include "commands/tablespace.h" #include "executor/spi.h" #include "nodes/makefuncs.h" #include "storage/proc.h" @@ -50,6 +51,8 @@ PG_FUNCTION_INFO_V1(init_table_size_table); PG_FUNCTION_INFO_V1(diskquota_start_worker); PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); +PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); +PG_FUNCTION_INFO_V1(set_role_tablespace_quota); PG_FUNCTION_INFO_V1(update_diskquota_db_list); /* timeout count to wait response from launcher process, in 1/10 sec */ @@ -61,7 +64,8 @@ static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static const char *ddl_err_code_to_err_message(MessageResult code); static int64 get_size_in_mb(char *str); -static void set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); +static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); +static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); /* ---- Help Functions to set quota limit. ---- */ /* @@ -400,7 +404,7 @@ set_role_quota(PG_FUNCTION_ARGS) sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); - set_quota_internal(roleoid, quota_limit_mb, ROLE_QUOTA); + set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA); PG_RETURN_VOID(); } @@ -430,16 +434,96 @@ set_schema_quota(PG_FUNCTION_ARGS) sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); - set_quota_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); + set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); PG_RETURN_VOID(); } /* - * Write the quota limit info into quota_config table under + * Set disk quota limit for tablepace role. + */ +Datum +set_role_tablespace_quota(PG_FUNCTION_ARGS) +{ +/* + * Write the quota limit info into target and quota_config table under * 'diskquota' schema of the current database. */ + Oid spcoid; + char *spcname; + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); + roleoid = get_role_oid(rolname, false); + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); + spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); + spcoid = get_tablespace_oid(spcname, false); + + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); + set_quota_config_internal(roleoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); + PG_RETURN_VOID(); +} + +/* + * Set disk quota limit for tablepace schema. + */ +Datum +set_schema_tablespace_quota(PG_FUNCTION_ARGS) +{ +/* + * Write the quota limit info into target and quota_config table under + * 'diskquota' schema of the current database. + */ + Oid spcoid; + char *spcname; + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); + namespaceoid = get_namespace_oid(nspname, false); + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); + spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); + spcoid = get_tablespace_oid(spcname, false); + + + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + quota_limit_mb = get_size_in_mb(sizestr); + + set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); + set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); + PG_RETURN_VOID(); +} + static void -set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) +set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) { int ret; StringInfoData buf; @@ -451,7 +535,7 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) targetoid, type); /* - * If error happens in set_quota_internal, just return error messages to + * If error happens in set_quota_config_internal, just return error messages to * the client side. So there is no need to catch the error. */ SPI_connect(); @@ -501,6 +585,62 @@ set_quota_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) return; } +static void +set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type) +{ + int ret; + StringInfoData buf; + + initStringInfo(&buf); + appendStringInfo(&buf, + "select true from diskquota.quota_config as q, diskquota.target as t" + " where t.primaryOid = %u" + " and t.tablespaceOid=%u" + " and t.quotaType=%d" + " and t.quotaType=q.quotaType" + " and t.primaryOid=q.targetOid;", + primaryoid, spcoid, type); + + /* + * If error happens in set_quota_config_internal, just return error messages to + * the client side. So there is no need to catch the error. + */ + SPI_connect(); + + ret = SPI_execute(buf.data, true, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select target setting table: error code %d", ret); + + /* if the schema or role's quota has been set before */ + if (SPI_processed == 0 && quota_limit_mb > 0) + { + resetStringInfo(&buf); + appendStringInfo(&buf, + "insert into diskquota.target values(%d,%u,%u)", + type, primaryoid, spcoid); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } + else if (SPI_processed > 0 && quota_limit_mb <= 0) + { + resetStringInfo(&buf); + appendStringInfo(&buf, + "delete from diskquota.target where primaryOid=%u" + " and tablespaceOid=%u;", + primaryoid, spcoid); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_DELETE) + elog(ERROR, "cannot delete item from target setting table, error code %d", ret); + } + + /* + * And finish our transaction. + */ + SPI_finish(); + return; +} + /* * Convert a human-readable size to a size in MB. */ diff --git a/expected/prepare.out b/expected/prepare.out index d701b379ef3..7c3d5d2060f 100644 --- a/expected/prepare.out +++ b/expected/prepare.out @@ -35,6 +35,8 @@ SELECT diskquota.set_schema_quota('badquota', '1 MB'); (1 row) +DROP ROLE IF EXISTS testbody; +NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE badquota.t1(i INT); diff --git a/expected/test_tablespace_role.out b/expected/test_tablespace_role.out new file mode 100644 index 00000000000..5437ca48fe5 --- /dev/null +++ b/expected/test_tablespace_role.out @@ -0,0 +1,151 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +NOTICE: tablespace "rolespc" does not exist, skipping +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; +DROP ROLE IF EXISTS rolespcu1; +NOTICE: role "rolespcu1" does not exist, skipping +DROP ROLE IF EXISTS rolespcu2; +NOTICE: role "rolespcu2" does not exist, skipping +CREATE ROLE rolespcu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespcu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b OWNER TO rolespcu1; +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b2 OWNER TO rolespcu1; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + rolespcu1 | rolespc | 1 | 4194304 +(1 row) + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +NOTICE: tablespace "rolespc2" does not exist, skipping +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '0 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +\! rm -rf /tmp/rolespc; +\! rm -rf /tmp/rolespc2 diff --git a/expected/test_tablespace_schema.out b/expected/test_tablespace_schema.out new file mode 100644 index 00000000000..52fdd26a378 --- /dev/null +++ b/expected/test_tablespace_schema.out @@ -0,0 +1,139 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +NOTICE: tablespace "schemaspc" does not exist, skipping +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1; +CREATE TABLE a(i int) TABLESPACE schemaspc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +CREATE TABLE a2(i int) TABLESPACE schemaspc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespcae_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespcae_in_bytes +-------------+-----------------+-------------+----------------------------- + spcs1 | schemaspc | 1 | 4030464 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +NOTICE: tablespace "schemaspc2" does not exist, skipping +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '0 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +\! rm -rf /tmp/schemaspc +\! rm -rf /tmp/schemaspc2 diff --git a/quotamodel.c b/quotamodel.c index 70c7e811c1c..13a83000f4c 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -26,6 +26,7 @@ #include "catalog/pg_database.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" +#include "commands/tablespace.h" #include "executor/spi.h" #include "funcapi.h" #include "lib/stringinfo.h" @@ -55,6 +56,7 @@ #define INIT_DISK_QUOTA_BLACK_ENTRIES 8192 /* per database level max size of black list */ #define MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES 8192 +#define MAX_NUM_KEYS_QUOTA_MAP 8 typedef struct TableSizeEntry TableSizeEntry; typedef struct NamespaceSizeEntry NamespaceSizeEntry; @@ -68,9 +70,10 @@ typedef struct LocalBlackMapEntry LocalBlackMapEntry; */ struct TableSizeEntry { - Oid reloid; - Oid namespaceoid; - Oid owneroid; + Oid reloid; + Oid tablespace_oid; + Oid namespaceoid; + Oid owneroid; int64 totalsize; /* table size including fsm, visibility map * etc. */ bool is_exist; /* flag used to check whether table is already @@ -78,48 +81,64 @@ struct TableSizeEntry bool need_flush; /* whether need to flush to table table_size */ }; -/* local cache of namespace disk size */ -struct NamespaceSizeEntry -{ - Oid namespaceoid; - int64 totalsize; +struct QuotaMapEntry { + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; + int64 size; + int64 limit; }; -/* local cache of role disk size */ -struct RoleSizeEntry -{ - Oid owneroid; - int64 totalsize; +struct QuotaInfo { + char *map_name; + unsigned int num_keys; + Oid *sys_cache; + HTAB *map; }; -/* local cache of disk quota limit */ -struct QuotaLimitEntry -{ - Oid targetoid; - int64 limitsize; +struct QuotaInfo quota_info[NUM_QUOTA_TYPES] = { + [NAMESPACE_QUOTA] = { + .map_name = "Namespace map", + .num_keys = 1, + .sys_cache = (Oid[]){ NAMESPACEOID }, + .map = NULL + }, + [ROLE_QUOTA] = { + .map_name = "Role map", + .num_keys = 1, + .sys_cache = (Oid[]){ AUTHOID }, + .map = NULL + }, + [NAMESPACE_TABLESPACE_QUOTA] = { + .map_name = "Tablespace-namespace map", + .num_keys = 2, + .sys_cache = (Oid[]){ NAMESPACEOID, TABLESPACEOID }, + .map = NULL + }, + [ROLE_TABLESPACE_QUOTA] = { + .map_name = "Tablespace-role map", + .num_keys = 2, + .sys_cache = (Oid[]){ AUTHOID, TABLESPACEOID }, + .map = NULL + } }; /* global blacklist for which exceed their quota limit */ struct BlackMapEntry { - Oid targetoid; - Oid databaseoid; + Oid targetoid; + Oid databaseoid; + Oid tablespace_oid; uint32 targettype; }; /* local blacklist for which exceed their quota limit */ struct LocalBlackMapEntry { - BlackMapEntry keyitem; + BlackMapEntry keyitem; bool isexceeded; }; /* using hash table to support incremental update the table size entry.*/ static HTAB *table_size_map = NULL; -static HTAB *namespace_size_map = NULL; -static HTAB *role_size_map = NULL; -static HTAB *namespace_quota_limit_map = NULL; -static HTAB *role_quota_limit_map = NULL; /* black list for database objects which exceed their quota limit */ static HTAB *disk_quota_black_map = NULL; @@ -127,18 +146,22 @@ static HTAB *local_disk_quota_black_map = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; +/* functions to maintain the quota maps */ +static void init_all_quota_maps(void); +static void update_size_for_quota(int64 size, QuotaType type, Oid* keys); +static void update_limit_for_quota(int64 limit, QuotaType type, Oid* keys); +static void remove_quota(QuotaType type, Oid* keys); +static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespace_oid); +static void check_quota_map(QuotaType type); +static void clear_all_quota_maps(void); +static void vacuum_all_quota_maps(void); +static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys); + /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); static void calculate_table_disk_usage(bool is_init); -static void calculate_schema_disk_usage(void); -static void calculate_role_disk_usage(void); static void flush_to_table_size(void); static void flush_local_black_map(void); -static void check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type); -static void update_namespace_map(Oid namespaceoid, int64 updatesize); -static void update_role_map(Oid owneroid, int64 updatesize); -static void remove_namespace_map(Oid namespaceoid); -static void remove_role_map(Oid owneroid); static bool load_quotas(void); static void do_load_quotas(void); static bool do_check_diskquota_state_is_ready(void); @@ -148,7 +171,182 @@ static void disk_quota_shmem_startup(void); static void init_lwlocks(void); static void truncateStringInfo(StringInfo str, int nchars); +static void export_exceeded_error(BlackMapEntry *blackentry); + +static void +init_all_quota_maps(void) +{ + HASHCTL hash_ctl = {0}; + hash_ctl.entrysize = sizeof(struct QuotaMapEntry); + hash_ctl.hcxt = TopMemoryContext; + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + hash_ctl.keysize = quota_info[type].num_keys * sizeof(Oid); + if (quota_info[type].num_keys == 1) + { + hash_ctl.hash = oid_hash; + } + else + { + hash_ctl.hash = tag_hash; + } + if (quota_info[type].map != NULL) + { + hash_destroy(quota_info[type].map); + } + quota_info[type].map = hash_create( + quota_info[type].map_name, 1024L, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + } +} + +/* add a new entry quota or update the old entry quota */ +static void +update_size_for_quota(int64 size, QuotaType type, Oid* keys) +{ + bool found; + struct QuotaMapEntry *entry = hash_search( + quota_info[type].map, keys, HASH_ENTER, &found); + if (!found) + { + entry->size = size; + entry->limit = -1; + memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); + } + else + { + entry->size += size; + } +} + +/* add a new entry quota or update the old entry limit */ +static void +update_limit_for_quota(int64 limit, QuotaType type, Oid* keys) +{ + bool found; + struct QuotaMapEntry *entry = hash_search( + quota_info[type].map, keys, HASH_ENTER, &found); + if (!found) + { + entry->size = 0; + memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); + } + entry->limit = limit; +} + +/* remove a entry quota from the map */ +static void +remove_quota(QuotaType type, Oid* keys) +{ + hash_search(quota_info[type].map, keys, HASH_REMOVE, NULL); +} + +/* + * Compare the disk quota limit and current usage of a database object. + * Put them into local blacklist if quota limit is exceeded. + */ +static void +add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespace_oid) +{ + LocalBlackMapEntry *localblackentry; + BlackMapEntry keyitem = {0}; + + keyitem.targetoid = targetOid; + keyitem.databaseoid = MyDatabaseId; + keyitem.tablespace_oid = tablespace_oid; + keyitem.targettype = (uint32) type; + ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist", targetOid))); + localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, + &keyitem, + HASH_ENTER, NULL); + localblackentry->isexceeded = true; + +} + +/* + * Check the quota map, if the entry doesn't exist any more, + * remove it from the map. Otherwise, check if it has hit + * the quota limit, if it does, add it to the black list. + */ +static void +check_quota_map(QuotaType type) +{ + HeapTuple tuple; + HASH_SEQ_STATUS iter; + struct QuotaMapEntry *entry; + + hash_seq_init(&iter, quota_info[type].map); + + while ((entry = hash_seq_search(&iter)) != NULL) + { + bool removed = false; + for (int i = 0; i < quota_info[type].num_keys; ++i) + { + tuple = SearchSysCache1(quota_info[type].sys_cache[i], ObjectIdGetDatum(entry->keys[i])); + if (!HeapTupleIsValid(tuple)) + { + remove_quota(type, entry->keys); + removed = true; + break; + } + ReleaseSysCache(tuple); + } + if (!removed) + { + if (entry->limit >= 0 && entry->size >= entry->limit) + { + Oid targetOid = entry->keys[0]; + Oid tablespace_oid = + (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) ? entry->keys[1] : InvalidOid; + /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespace_oid + * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid + */ + add_quota_to_blacklist(type, targetOid, tablespace_oid); + } + } + } +} + +/* transfer one table's size from one quota to another quota */ +static void +transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys) +{ + update_size_for_quota(-totalsize, type, old_keys); + update_size_for_quota(totalsize, type, new_keys); +} +static void +clear_all_quota_maps(void) +{ + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + HASH_SEQ_STATUS iter = {0}; + hash_seq_init(&iter, quota_info[type].map); + struct QuotaMapEntry *entry = NULL; + while ((entry = hash_seq_search(&iter)) != NULL) + { + entry->limit = -1; + } + } +} + +static void +vacuum_all_quota_maps(void) { + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + HASH_SEQ_STATUS iter = {0}; + hash_seq_init(&iter, quota_info[type].map); + struct QuotaMapEntry *entry = NULL; + while ((entry = hash_seq_search(&iter)) != NULL) + { + if (entry->limit == -1) + { + remove_quota(type, entry->keys); + } + } + + } + +} /* ---- Functions for disk quota shared memory ---- */ /* * DiskQuotaShmemInit @@ -286,44 +484,7 @@ init_disk_quota_model(void) &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(NamespaceSizeEntry); - hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = oid_hash; - - namespace_size_map = hash_create("NamespaceSizeEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(RoleSizeEntry); - hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = oid_hash; - - role_size_map = hash_create("RoleSizeEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - /* initialize hash table for quota limit */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(QuotaLimitEntry); - hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = oid_hash; - - namespace_quota_limit_map = hash_create("Namespace QuotaLimitEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - role_quota_limit_map = hash_create("Role QuotaLimitEntry map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + init_all_quota_maps(); /* * local diskquota black map is used to reduce the lock hold time of @@ -483,7 +644,7 @@ refresh_disk_quota_model(bool is_init) } /* - * Update the disk usage of namespace and role. + * Update the disk usage of namespace, role and tablespace. * Put the exceeded namespace and role into shared black map. * Parameter 'is_init' is true when it's the first time that worker * process is constructing quota model. @@ -495,6 +656,7 @@ refresh_disk_quota_usage(bool is_init) bool pushed_active_snap = false; bool ret = true; + elog(LOG, "refresh diskquota usage..."); StartTransactionCommand(); /* @@ -515,8 +677,9 @@ refresh_disk_quota_usage(bool is_init) pushed_active_snap = true; /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(is_init); - calculate_schema_disk_usage(); - calculate_role_disk_usage(); + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { + check_quota_map(type); + } /* flush local table_size_map to user table table_size */ flush_to_table_size(); /* copy local black map back to shared black map */ @@ -554,6 +717,11 @@ refresh_disk_quota_usage(bool is_init) * Parameter 'is_init' set to true at initialization stage to fetch tables * size from table table_size */ + +/* FIXME: we should only care about the tables whose role, schema, or tablespace + * has quota, this may improve the performance especially when too many tables + * in the database + */ static void calculate_table_disk_usage(bool is_init) { @@ -614,8 +782,9 @@ calculate_table_disk_usage(bool is_init) { tsentry->reloid = relOid; tsentry->totalsize = 0; - tsentry->owneroid = 0; - tsentry->namespaceoid = 0; + tsentry->owneroid = InvalidOid; + tsentry->namespaceoid = InvalidOid; + tsentry->tablespace_oid = InvalidOid; tsentry->need_flush = true; } @@ -654,9 +823,11 @@ calculate_table_disk_usage(bool is_init) tsentry->totalsize = (int64) active_table_entry->tablesize; tsentry->need_flush = true; - /* update the disk usage of namespace and owner */ - update_namespace_map(tsentry->namespaceoid, updated_total_size); - update_role_map(tsentry->owneroid, updated_total_size); + /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does not exist in the table_size_map */ + update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}); + update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}); + update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}); + update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}); } /* table size info doesn't need to flush at init quota model stage */ @@ -668,16 +839,51 @@ calculate_table_disk_usage(bool is_init) /* if schema change, transfer the file size */ if (tsentry->namespaceoid != classForm->relnamespace) { - update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, + (Oid[]){classForm->relnamespace}); + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}, + (Oid[]){classForm->relnamespace, tsentry->tablespace_oid}); tsentry->namespaceoid = classForm->relnamespace; - update_namespace_map(tsentry->namespaceoid, tsentry->totalsize); } /* if owner change, transfer the file size */ if (tsentry->owneroid != classForm->relowner) { - update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); + transfer_table_for_quota( + tsentry->totalsize, + ROLE_QUOTA, + (Oid[]){tsentry->owneroid}, + (Oid[]){classForm->relowner} + ); + transfer_table_for_quota( + tsentry->totalsize, + ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}, + (Oid[]){classForm->relowner, tsentry->tablespace_oid} + ); tsentry->owneroid = classForm->relowner; - update_role_map(tsentry->owneroid, tsentry->totalsize); + } + + if (tsentry->tablespace_oid != classForm->reltablespace) + { + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}, + (Oid[]){tsentry->namespaceoid, classForm->reltablespace} + ); + transfer_table_for_quota( + tsentry->totalsize, + ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}, + (Oid[]){tsentry->owneroid, classForm->reltablespace} + ); + tsentry->tablespace_oid = classForm->reltablespace; } } @@ -694,66 +900,14 @@ calculate_table_disk_usage(bool is_init) { if (tsentry->is_exist == false) { - update_role_map(tsentry->owneroid, -1 * tsentry->totalsize); - update_namespace_map(tsentry->namespaceoid, -1 * tsentry->totalsize); + update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}); + update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}); + update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}); + update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}); } } } -/* - * Check the namespace quota limit and current usage - * Remove dropped namespace from namespace_size_map - */ -static void -calculate_schema_disk_usage(void) -{ - HeapTuple tuple; - HASH_SEQ_STATUS iter; - NamespaceSizeEntry *nsentry; - - hash_seq_init(&iter, namespace_size_map); - - while ((nsentry = hash_seq_search(&iter)) != NULL) - { - /* check if namespace is already be deleted */ - tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nsentry->namespaceoid)); - if (!HeapTupleIsValid(tuple)) - { - remove_namespace_map(nsentry->namespaceoid); - continue; - } - ReleaseSysCache(tuple); - check_disk_quota_by_oid(nsentry->namespaceoid, nsentry->totalsize, NAMESPACE_QUOTA); - } -} - -/* - * Check the role quota limit and current usage - * Remove dropped role from roel_size_map - */ -static void -calculate_role_disk_usage(void) -{ - HeapTuple tuple; - HASH_SEQ_STATUS iter; - RoleSizeEntry *rolentry; - - hash_seq_init(&iter, role_size_map); - - while ((rolentry = hash_seq_search(&iter)) != NULL) - { - /* check if role is already be deleted */ - tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(rolentry->owneroid)); - if (!HeapTupleIsValid(tuple)) - { - remove_role_map(rolentry->owneroid); - continue; - } - ReleaseSysCache(tuple); - check_disk_quota_by_oid(rolentry->owneroid, rolentry->totalsize, ROLE_QUOTA); - } -} - /* * Flush the table_size_map to user table diskquota.table_size * To improve update performance, we first delete all the need_to_flush @@ -825,7 +979,7 @@ flush_to_table_size(void) /* * Generate the new shared blacklist from the local_black_list which * exceed the quota limit. - * local_black_list is used to reduce the lock race. + * local_black_list is used to reduce the lock contention. */ static void flush_local_black_map(void) @@ -877,128 +1031,6 @@ flush_local_black_map(void) LWLockRelease(diskquota_locks.black_map_lock); } -/* - * Compare the disk quota limit and current usage of a database object. - * Put them into local blacklist if quota limit is exceeded. - */ -static void -check_disk_quota_by_oid(Oid targetOid, int64 current_usage, QuotaType type) -{ - bool found; - int32 quota_limit_mb; - int32 current_usage_mb; - LocalBlackMapEntry *localblackentry; - BlackMapEntry keyitem; - - QuotaLimitEntry *quota_entry; - - if (type == NAMESPACE_QUOTA) - { - quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, - &targetOid, - HASH_FIND, &found); - } - else if (type == ROLE_QUOTA) - { - quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, - &targetOid, - HASH_FIND, &found); - } - else - return; /* skip check if not namespace or role quota */ - - /* default no limit */ - if (!found) - return; - - quota_limit_mb = quota_entry->limitsize; - current_usage_mb = current_usage / (1024 * 1024); - if (current_usage_mb >= quota_limit_mb) - { - memset(&keyitem, 0, sizeof(BlackMapEntry)); - keyitem.targetoid = targetOid; - keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = (uint32) type; - ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist with quota limit:%d, current usage:%d", - targetOid, quota_limit_mb, current_usage_mb))); - localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, - &keyitem, - HASH_ENTER, &found); - localblackentry->isexceeded = true; - } - -} - -/* - * Remove a namespace from local namespace_size_map - */ -static void -remove_namespace_map(Oid namespaceoid) -{ - hash_search(namespace_size_map, - &namespaceoid, - HASH_REMOVE, NULL); -} - -/* - * Update the current disk usage of a namespace in namespace_size_map. - */ -static void -update_namespace_map(Oid namespaceoid, int64 updatesize) -{ - bool found; - NamespaceSizeEntry *nsentry; - - nsentry = (NamespaceSizeEntry *) hash_search(namespace_size_map, - &namespaceoid, - HASH_ENTER, &found); - if (!found) - { - nsentry->namespaceoid = namespaceoid; - nsentry->totalsize = updatesize; - } - else - { - nsentry->totalsize += updatesize; - } - -} - -/* - * Remove a namespace from local role_size_map - */ -static void -remove_role_map(Oid owneroid) -{ - hash_search(role_size_map, - &owneroid, - HASH_REMOVE, NULL); -} - -/* - * Update the current disk usage of a namespace in role_size_map. - */ -static void -update_role_map(Oid owneroid, int64 updatesize) -{ - bool found; - RoleSizeEntry *rolentry; - - rolentry = (RoleSizeEntry *) hash_search(role_size_map, - &owneroid, - HASH_ENTER, &found); - if (!found) - { - rolentry->owneroid = owneroid; - rolentry->totalsize = updatesize; - } - else - { - rolentry->totalsize += updatesize; - } - -} - /* * Make sure a StringInfo's string is no longer than 'nchars' characters. */ @@ -1074,115 +1106,130 @@ load_quotas(void) static void do_load_quotas(void) { - int ret; + int ret; TupleDesc tupdesc; - int i; - bool found; - QuotaLimitEntry *quota_entry; - HASH_SEQ_STATUS iter; + int i; /* * TODO: we should skip to reload quota config when there is no change in * quota.config. A flag in shared memory could be used to detect the quota * config change. */ - /* clear entries in quota limit map */ - hash_seq_init(&iter, namespace_quota_limit_map); - while ((quota_entry = hash_seq_search(&iter)) != NULL) + clear_all_quota_maps(); + const unsigned int NUM_ATTRIBUTES = 4; + + /* + * read quotas from diskquota.quota_config and target table + */ + + Oid nsoid = get_namespace_oid("diskquota", false); + if (nsoid == InvalidOid) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] diskquota schema doesn't exist in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); + Oid targetTableOid = get_relname_relid("target", nsoid); + /* + * For diskquota 1.0, there is no target table in diskquota schema. + * Why do we need this? + * As when we upgrade diskquota extension from 1.0 to another version, + * we need firstly reload the new diskquota.so and then execute the + * upgrade SQL. However, between the 2 steps, the new diskquota.so + * needs to work with the old version diskquota sql file, otherwise, + * the init work will fail and diskquota can not work correctly. + * Maybe this is not the best sulotion, only a work arround. Optimizing + * the init procedure is a better solution. + */ + if (targetTableOid == InvalidOid) { - (void) hash_search(namespace_quota_limit_map, - (void *) "a_entry->targetoid, - HASH_REMOVE, NULL); + ret = SPI_execute("select targetoid, quotatype, quotalimitMB, 0 as tablespaceoid from diskquota.quota_config", true, 0); } - - hash_seq_init(&iter, role_quota_limit_map); - while ((quota_entry = hash_seq_search(&iter)) != NULL) + else { - (void) hash_search(role_quota_limit_map, - (void *) "a_entry->targetoid, - HASH_REMOVE, NULL); + ret = SPI_execute( + "SELECT targetOid, c.quotaType, quotalimitMB, COALESCE(tablespaceoid, 0)" + "FROM diskquota.quota_config c LEFT OUTER JOIN diskquota.target t " + "ON c.targetOid = t.primaryOid and c.quotatype = t.quotatype", true, 0); } - - /* - * read quotas from diskquota.quota_config - */ - ret = SPI_execute("select targetoid, quotatype, quotalimitMB from diskquota.quota_config", true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 3 || - ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT4OID || - ((tupdesc)->attrs[2])->atttypid != INT8OID) + if (tupdesc->natts != NUM_ATTRIBUTES || + ((tupdesc)->attrs[0])->atttypid != OIDOID || + ((tupdesc)->attrs[1])->atttypid != INT4OID || + ((tupdesc)->attrs[2])->atttypid != INT8OID) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] configuration table \"quota_config\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] configuration table is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } for (i = 0; i < SPI_processed; i++) { HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - Oid targetOid; - int64 quota_limit_mb; - QuotaType quotatype; - bool isnull; + Datum vals[NUM_ATTRIBUTES]; + bool isnull[NUM_ATTRIBUTES]; - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - continue; - targetOid = DatumGetObjectId(dat); - - dat = SPI_getbinval(tup, tupdesc, 2, &isnull); - if (isnull) - continue; - quotatype = (QuotaType) DatumGetInt32(dat); + for (int i = 0; i < NUM_ATTRIBUTES; ++i) + { + vals[i] = SPI_getbinval(tup, tupdesc, i + 1, &(isnull[i])); + if (i <= 2 && isnull[i]) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] attibutes in configuration table MUST NOT be NULL"))); + } + } - dat = SPI_getbinval(tup, tupdesc, 3, &isnull); - if (isnull) - continue; - quota_limit_mb = DatumGetInt64(dat); + Oid targetOid = DatumGetObjectId(vals[0]); + int quotaType = (QuotaType) DatumGetInt32(vals[1]); + int64 quota_limit_mb = DatumGetInt64(vals[2]); + Oid spcOid = DatumGetObjectId(vals[3]); - if (quotatype == NAMESPACE_QUOTA) + if (spcOid == InvalidOid) { - quota_entry = (QuotaLimitEntry *) hash_search(namespace_quota_limit_map, - &targetOid, - HASH_ENTER, &found); - quota_entry->limitsize = quota_limit_mb; + if (quota_info[quotaType].num_keys != 1) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d", quotaType))); + } + update_limit_for_quota(quota_limit_mb * (1 << 20), quotaType, (Oid[]){targetOid}); } - else if (quotatype == ROLE_QUOTA) + else { - quota_entry = (QuotaLimitEntry *) hash_search(role_quota_limit_map, - &targetOid, - HASH_ENTER, &found); - quota_entry->limitsize = quota_limit_mb; + update_limit_for_quota(quota_limit_mb * (1 << 20), quotaType, (Oid[]){targetOid, spcOid}); } } + + vacuum_all_quota_maps(); return; } /* * Given table oid, search for namespace and owner. */ -static void -get_rel_owner_schema(Oid relid, Oid *ownerOid, Oid *nsOid) +static bool +get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *tablespace_oid) { HeapTuple tp; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + bool found = HeapTupleIsValid(tp); if (HeapTupleIsValid(tp)) { Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); *ownerOid = reltup->relowner; *nsOid = reltup->relnamespace; + *tablespace_oid = reltup->reltablespace; ReleaseSysCache(tp); } - return; + return found; } /* @@ -1195,6 +1242,7 @@ quota_check_common(Oid reloid) { Oid ownerOid = InvalidOid; Oid nsOid = InvalidOid; + Oid tablespace_oid = InvalidOid; bool found; BlackMapEntry keyitem; @@ -1202,45 +1250,47 @@ quota_check_common(Oid reloid) { return true; } - memset(&keyitem, 0, sizeof(BlackMapEntry)); - get_rel_owner_schema(reloid, &ownerOid, &nsOid); + + bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespace_oid); + if (!found_rel) + { + return true; + } LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); - - /* check schema quota */ - if (nsOid != InvalidOid) + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - keyitem.targetoid = nsOid; - keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = NAMESPACE_QUOTA; - hash_search(disk_quota_black_map, - &keyitem, - HASH_FIND, &found); - if (found) + if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) + { + keyitem.targetoid = ownerOid; + } + else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + { + keyitem.targetoid = nsOid; + } + else { - LWLockRelease(diskquota_locks.black_map_lock); ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("schema's disk space quota exceeded with name:%s", get_namespace_name(nsOid)))); - return false; + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown quota type: %d", type))); + } + if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + { + keyitem.tablespace_oid = tablespace_oid; + } + else + { + /* refer to add_quota_to_blacklist */ + keyitem.tablespace_oid = InvalidOid; } - - } - - /* check role quota */ - if (ownerOid != InvalidOid) - { - keyitem.targetoid = ownerOid; keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = ROLE_QUOTA; + keyitem.targettype = type; hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); if (found) { LWLockRelease(diskquota_locks.black_map_lock); - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(ownerOid)))); + export_exceeded_error(&keyitem); return false; } } @@ -1268,3 +1318,36 @@ invalidate_database_blackmap(Oid dbid) } LWLockRelease(diskquota_locks.black_map_lock); } + +static void +export_exceeded_error(BlackMapEntry *blackentry) +{ + switch(blackentry->targettype) + { + case NAMESPACE_QUOTA: + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("schema's disk space quota exceeded with name:%s", get_namespace_name(blackentry->targetoid)))); + break; + case ROLE_QUOTA: + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(blackentry->targetoid)))); + break; + case NAMESPACE_TABLESPACE_QUOTA: + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace:%s schema:%s diskquota exceeded", get_tablespace_name(blackentry->tablespace_oid), get_namespace_name(blackentry->targetoid)))); + break; + case ROLE_TABLESPACE_QUOTA: + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace: %s role: %s diskquota exceeded", get_tablespace_name(blackentry->tablespace_oid), GetUserNameFromId(blackentry->targetoid)))); + break; + default : + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("diskquota exceeded, unknown quota type"))); + } + +} diff --git a/sql/prepare.sql b/sql/prepare.sql index 63b7c268c34..2ce7c902eea 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -10,6 +10,7 @@ SELECT pg_sleep(15); -- prepare a schema that has reached quota limit CREATE SCHEMA badquota; SELECT diskquota.set_schema_quota('badquota', '1 MB'); +DROP ROLE IF EXISTS testbody; CREATE ROLE testbody; CREATE TABLE badquota.t1(i INT); ALTER TABLE badquota.t1 OWNER TO testbody; diff --git a/sql/test_tablespace_role.sql b/sql/test_tablespace_role.sql new file mode 100644 index 00000000000..1094f000322 --- /dev/null +++ b/sql/test_tablespace_role.sql @@ -0,0 +1,85 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; + +DROP ROLE IF EXISTS rolespcu1; +DROP ROLE IF EXISTS rolespcu2; +CREATE ROLE rolespcu1 NOLOGIN; +CREATE ROLE rolespcu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc; +ALTER TABLE b OWNER TO rolespcu1; +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +ALTER TABLE b2 OWNER TO rolespcu1; + +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); + +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); +SELECT pg_sleep(20); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '0 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +\! rm -rf /tmp/rolespc; +\! rm -rf /tmp/rolespc2 diff --git a/sql/test_tablespace_schema.sql b/sql/test_tablespace_schema.sql new file mode 100644 index 00000000000..0b541d00618 --- /dev/null +++ b/sql/test_tablespace_schema.sql @@ -0,0 +1,75 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +SET search_path TO spcs1; + +CREATE TABLE a(i int) TABLESPACE schemaspc; +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int) TABLESPACE schemaspc; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT pg_sleep(10); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespcae_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); +SELECT pg_sleep(20); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '0 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); + +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +\! rm -rf /tmp/schemaspc +\! rm -rf /tmp/schemaspc2 + From 97f2d8bbe0da4ae9755b76b7331f8fa0904a44db Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 31 Aug 2021 17:34:13 +0800 Subject: [PATCH 069/330] Support tablespace per segment ratio (#62) * support to set tablespace per segment ratio to limit disk usage on the segment. SELECT diskquota.set_per_segment_quota('%tablespace_name%', ratio) 1) Add one column "segid" for the table_size, and change the primary key. CREATE TABLE diskquota.table_size (tableid oid, size bigint, smallint segid, PRIMARY KEY(tableid, segid)); when segid is -1, the size is the sum of the tablesize for one table of master and segment when segid is one specific id of a segment, the size is the tablesize on that segment. Modify init_table_size_table, load_table_size and flush_to_table_size functions. 2) Add segid in DiskQuotaActiveTableEntry, change the map key to segid + relOid. In function pull_active_table_size_from_seg, we put all segments and master active tables and their table size into the active table map. Change diskquota.diskquota_active_table_type, add attribute "GP_SEGMENT_ID" for it. 3) Add segid in QuotaMapEntry. When segid is -1, we set the limit to be the quota limit, size to be the sum of size from master and segments. Otherwise, we set the limit to be the per-segment quota limit, and size is the size of the segment. When the size exceeds the limit, the query will be rejected. About segid: The segid is the same as content id in the gp_segment_configuration, as the content id is continuous, so it's safe to use SEGCOUNT (which is the total num of the segments) to get the segid. * update upgrade_test, support to test old version extension scripts with new version diskquota so. When upgrading diskquota extension from 1.0 to 2.0, before executing the SQL "alter extension diskquota update to '2.0'", diskquota.so 2.0 needs to work with diskquota--1.0.sql. So a lot of places in the code use var extversion to handle it. For example, table_size's schema is different between 1.0 and 2.0, when using it, we have checked the extversion. * Report error when the quota is 0 or the segratio is 0 --- concourse/scripts/test_diskquota.sh | 2 +- concourse/scripts/upgrade_extension.sh | 13 +- diskquota--1.0--2.0.sql | 26 +- diskquota--2.0.sql | 22 +- diskquota.h | 3 + diskquota_schedule | 2 +- diskquota_utility.c | 255 ++++++++- expected/test_mistake.out | 27 + expected/test_tablespace_role.out | 2 +- expected/test_tablespace_role_perseg.out | 197 +++++++ expected/test_tablespace_schema.out | 6 +- expected/test_tablespace_schema_perseg.out | 198 +++++++ gp_activetable.c | 154 +++-- gp_activetable.h | 9 +- init_file | 2 + quotamodel.c | 526 +++++++++++------- sql/test_mistake.sql | 22 + sql/test_tablespace_role.sql | 2 +- sql/test_tablespace_role_perseg.sql | 95 ++++ sql/test_tablespace_schema.sql | 4 +- sql/test_tablespace_schema_perseg.sql | 89 +++ upgrade_test/diskquota_schedule_upgrade | 14 +- upgrade_test/expected/clean.out | 46 +- upgrade_test/expected/install_old_version.out | 2 + upgrade_test/expected/prepare.out | 28 + upgrade_test/expected/test_role.out | 4 + .../expected/test_tablespace_role.out | 151 +++++ .../expected/test_tablespace_role_perseg.out | 197 +++++++ .../expected/test_tablespace_schema.out | 139 +++++ .../test_tablespace_schema_perseg.out | 198 +++++++ upgrade_test/sql/clean.sql | 29 +- upgrade_test/sql/install_old_version.sql | 2 + upgrade_test/sql/prepare.sql | 13 + upgrade_test/sql/test_role.sql | 4 + upgrade_test/sql/test_tablespace_role.sql | 85 +++ .../sql/test_tablespace_role_perseg.sql | 95 ++++ upgrade_test/sql/test_tablespace_schema.sql | 75 +++ .../sql/test_tablespace_schema_perseg.sql | 89 +++ 38 files changed, 2520 insertions(+), 307 deletions(-) create mode 100644 expected/test_tablespace_role_perseg.out create mode 100644 expected/test_tablespace_schema_perseg.out create mode 100644 sql/test_tablespace_role_perseg.sql create mode 100644 sql/test_tablespace_schema_perseg.sql create mode 100644 upgrade_test/expected/install_old_version.out create mode 100644 upgrade_test/expected/test_tablespace_role.out create mode 100644 upgrade_test/expected/test_tablespace_role_perseg.out create mode 100644 upgrade_test/expected/test_tablespace_schema.out create mode 100644 upgrade_test/expected/test_tablespace_schema_perseg.out create mode 100644 upgrade_test/sql/install_old_version.sql create mode 100644 upgrade_test/sql/test_tablespace_role.sql create mode 100644 upgrade_test/sql/test_tablespace_role_perseg.sql create mode 100644 upgrade_test/sql/test_tablespace_schema.sql create mode 100644 upgrade_test/sql/test_tablespace_schema_perseg.sql diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 69d7e943261..709b71b9fac 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -15,7 +15,7 @@ function _main() { time make_cluster time install_diskquota - if [ "${DISKQUOTA_OS}" == "ubuntu18.04" ]; then + if [ "${DISKQUOTA_OS}" == "ubuntu18.04" -o "${DISKQUOTA_OS}" == "rhel6" ]; then CUT_NUMBER=6 fi diff --git a/concourse/scripts/upgrade_extension.sh b/concourse/scripts/upgrade_extension.sh index c1523f01caa..a867819563d 100755 --- a/concourse/scripts/upgrade_extension.sh +++ b/concourse/scripts/upgrade_extension.sh @@ -10,11 +10,11 @@ CUT_NUMBER=5 source "${GPDB_CONCOURSE_DIR}/common.bash" source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" +# those two functions are called by upgrade_test function install_old_version_diskquota() { - tar -xzf bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel + tar -xzf ../../bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel } -# this function is called by upgrade_test/sql/upgrade_extension.sql function install_new_version_diskquota() { # the current dir is upgrade_test tar -xzf ../../bin_diskquota_new/*.tar.gz -C /usr/local/greenplum-db-devel @@ -28,11 +28,10 @@ function _main() { if [ "${DISKQUOTA_OS}" == "ubuntu18.04" ]; then CUT_NUMBER=6 fi - - # install old_version diskquota - time install_old_version_diskquota - # export install_new_version_diskquota function, becuase it will - # be called by upgrade_test/sql/upgrade_extension.sql + # firstly install an old version diskquota to start diskquota + tar -xzf bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel + # export install_old_version_diskquota install_new_version_diskquota function, becuase they will be called by upgrade_test + export -f install_old_version_diskquota export -f install_new_version_diskquota time test ${TOP_DIR}/diskquota_src/upgrade_test false } diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 3816b3bf4be..8021f216b50 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -1,3 +1,5 @@ +ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT -1; + CREATE TABLE diskquota.target ( quotatype int, --REFERENCES disquota.quota_config.quotatype, primaryOid oid, @@ -15,13 +17,22 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; +ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; +ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid,segid); + CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 and ts.segid=-1 group by relnamespace, qc.quotalimitMB, pgns.nspname order by pgns.nspname; @@ -31,18 +42,18 @@ from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 and ts.segid=-1 GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespcae_in_bytes +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespace_in_bytes from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_namespace as pgns, pg_tablespace as pgsp, diskquota.target as t -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname order by pgns.nspname, pgsp.spcname; @@ -54,5 +65,10 @@ from diskquota.table_size as ts, pg_roles as pgr, pg_tablespace as pgsp, diskquota.target as t -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; + +CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS +SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1)) AS dbsize; + +ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 2a5c8b58f31..4a4c4b0f700 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -6,7 +6,7 @@ CREATE SCHEMA diskquota; -- Configuration table -CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); +CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, segratio float4 DEFAULT -1, PRIMARY KEY(targetOid, quotatype)); CREATE TABLE diskquota.target ( quotatype int, --REFERENCES disquota.quota_config.quotatype, @@ -38,13 +38,17 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); +CREATE TABLE diskquota.table_size (tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); @@ -66,7 +70,7 @@ from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 and ts.segid=-1 group by relnamespace, qc.quotalimitMB, pgns.nspname order by pgns.nspname; @@ -76,18 +80,18 @@ from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 and ts.segid=-1 GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespcae_in_bytes +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespace_in_bytes from diskquota.table_size as ts, pg_class as pgc, diskquota.quota_config as qc, pg_namespace as pgns, pg_tablespace as pgsp, diskquota.target as t -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname order by pgns.nspname, pgsp.spcname; @@ -99,13 +103,13 @@ from diskquota.table_size as ts, pg_roles as pgr, pg_tablespace as pgsp, diskquota.target as t -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; +SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1)) AS dbsize; -CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); +CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8, "GP_SEGMENT_ID" smallint); CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' diff --git a/diskquota.h b/diskquota.h index 9cff8633ce8..f07b96ba388 100644 --- a/diskquota.h +++ b/diskquota.h @@ -112,4 +112,7 @@ extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); extern int diskquota_naptime; extern int diskquota_max_active_tables; +extern int SEGCOUNT; +extern int get_ext_major_version(void); +extern void truncateStringInfo(StringInfo str, int nchars); #endif diff --git a/diskquota_schedule b/diskquota_schedule index 4c0903a4877..edbe3bd9810 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -4,7 +4,7 @@ test: prepare # test: test_table_size test: test_fast_disk_check #test: test_insert_after_drop -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test: test_truncate test: test_delete_quota test: test_partition diff --git a/diskquota_utility.c b/diskquota_utility.c index 6087a452b0f..eb700472632 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -39,8 +39,10 @@ #include "utils/formatting.h" #include "utils/memutils.h" #include "utils/numeric.h" +#include "utils/snapmgr.h" #include +#include #include "diskquota.h" #include "gp_activetable.h" @@ -54,6 +56,7 @@ PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); PG_FUNCTION_INFO_V1(update_diskquota_db_list); +PG_FUNCTION_INFO_V1(set_per_segment_quota); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 1200 @@ -66,6 +69,9 @@ static const char *ddl_err_code_to_err_message(MessageResult code); static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); + +int get_ext_major_version(void); /* ---- Help Functions to set quota limit. ---- */ /* @@ -77,12 +83,14 @@ static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb Datum init_table_size_table(PG_FUNCTION_ARGS) { - int ret; - StringInfoData buf; + int ret; + StringInfoData buf; + StringInfoData insert_buf; - RangeVar *rv; + RangeVar *rv; Relation rel; - + int extMajorVersion; + bool insert_flag; /* * If error happens in init_table_size_table, just return error messages * to the client side. So there is no need to catch the error. @@ -100,19 +108,33 @@ init_table_size_table(PG_FUNCTION_ARGS) } heap_close(rel, NoLock); + /* + * Why don't use insert into diskquota.table_size select from pg_total_relation_size here? + * + * insert into foo select oid, pg_total_relation_size(oid), -1 from pg_class where + * oid >= 16384 and (relkind='r' or relkind='m'); + * ERROR: This query is not currently supported by GPDB. (entry db 127.0.0.1:6000 pid=61114) + * + * Some functions are peculiar in that they do their own dispatching. + * Such as pg_total_relation_size. + * They do not work on entry db since we do not support dispatching + * from entry-db currently. + */ SPI_connect(); + extMajorVersion = get_ext_major_version(); /* delete all the table size info in table_size if exist. */ initStringInfo(&buf); + initStringInfo(&insert_buf); appendStringInfo(&buf, "delete from diskquota.table_size;"); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete table_size table: error code %d", ret); - /* fetch table size */ + /* fetch table size for master*/ resetStringInfo(&buf); appendStringInfo(&buf, - "select oid, pg_total_relation_size(oid)" + "select oid, pg_total_relation_size(oid), -1" " from pg_class" " where oid >= %u and (relkind='r' or relkind='m')", FirstNormalObjectId); @@ -120,31 +142,31 @@ init_table_size_table(PG_FUNCTION_ARGS) if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_total_relation_size. error code %d", ret); - /* fill table_size table with table oid and size info. */ + /* fill table_size table with table oid and size info for master. */ + appendStringInfo(&insert_buf, + "insert into diskquota.table_size values"); + insert_flag = generate_insert_table_size_sql(&insert_buf, extMajorVersion); + /* fetch table size on segments*/ resetStringInfo(&buf); appendStringInfo(&buf, - "insert into diskquota.table_size values"); - TupleDesc tupdesc = SPI_tuptable->tupdesc; - for(int i = 0; i < SPI_processed; i++) - { - HeapTuple tup; - bool isnull; - Oid oid; - int64 sz; - - tup = SPI_tuptable->vals[i]; - oid = SPI_getbinval(tup,tupdesc, 1, &isnull); - sz = SPI_getbinval(tup,tupdesc, 2, &isnull); + "select oid, pg_total_relation_size(oid), gp_segment_id" + " from gp_dist_random('pg_class')" + " where oid >= %u and (relkind='r' or relkind='m')", + FirstNormalObjectId); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot fetch in pg_total_relation_size. error code %d", ret); - appendStringInfo(&buf, " ( %u, %ld)", oid, sz); - if(i + 1 < SPI_processed) - appendStringInfoChar(&buf, ','); + /* fill table_size table with table oid and size info for segments. */ + insert_flag = insert_flag | generate_insert_table_size_sql(&insert_buf, extMajorVersion); + if (insert_flag) + { + truncateStringInfo(&insert_buf, insert_buf.len - strlen(",")); + appendStringInfo(&insert_buf, ";"); + ret = SPI_execute(insert_buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert table_size_per_segment table: error code %d", ret); } - appendStringInfo(&buf, ";"); - - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert table_size table: error code %d", ret); /* set diskquota state to ready. */ resetStringInfo(&buf); @@ -159,6 +181,47 @@ init_table_size_table(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* last_part is true means there is no other set of values to be inserted to table_size */ +static bool +generate_insert_table_size_sql(StringInfoData *insert_buf, int extMajorVersion) +{ + TupleDesc tupdesc = SPI_tuptable->tupdesc; + bool insert_flag = false; + for(int i = 0; i < SPI_processed; i++) + { + HeapTuple tup; + bool isnull; + Oid oid; + int64 sz; + int16 segid; + + tup = SPI_tuptable->vals[i]; + oid = SPI_getbinval(tup,tupdesc, 1, &isnull); + sz = SPI_getbinval(tup,tupdesc, 2, &isnull); + segid = SPI_getbinval(tup,tupdesc, 3, &isnull); + switch (extMajorVersion) + { + case 1: + /* for version 1.0, only insert the values from master */ + if (segid == -1) + { + appendStringInfo(insert_buf, " ( %u, %ld),", oid, sz); + insert_flag = true; + } + break; + case 2: + appendStringInfo(insert_buf, " ( %u, %ld, %d),", oid, sz, segid); + insert_flag = true; + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + + } + } + return insert_flag; +} /* * Trigger to start diskquota worker when create extension diskquota. * This function is called at backend side, and will send message to @@ -404,6 +467,12 @@ set_role_quota(PG_FUNCTION_ARGS) sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("disk quota can not be set to 0 MB"))); + } set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA); PG_RETURN_VOID(); } @@ -434,6 +503,12 @@ set_schema_quota(PG_FUNCTION_ARGS) sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("disk quota can not be set to 0 MB"))); + } set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); PG_RETURN_VOID(); } @@ -474,6 +549,12 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("disk quota can not be set to 0 MB"))); + } set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); set_quota_config_internal(roleoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); @@ -516,6 +597,12 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("disk quota can not be set to 0 MB"))); + } set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); @@ -555,7 +642,7 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); } - else if (SPI_processed > 0 && quota_limit_mb <= 0) + else if (SPI_processed > 0 && quota_limit_mb < 0) { resetStringInfo(&buf); appendStringInfo(&buf, @@ -622,7 +709,7 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); } - else if (SPI_processed > 0 && quota_limit_mb <= 0) + else if (SPI_processed > 0 && quota_limit_mb < 0) { resetStringInfo(&buf); appendStringInfo(&buf, @@ -827,3 +914,113 @@ update_diskquota_db_list(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + +/* + * Function to set disk quota ratio for per-segment + */ +Datum +set_per_segment_quota(PG_FUNCTION_ARGS) +{ + int ret; + Oid spcoid; + char *spcname; + float4 ratio; + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to set disk quota limit"))); + } + + spcname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); + spcoid = get_tablespace_oid(spcname, false); + + ratio = PG_GETARG_FLOAT4(1); + + if (ratio == 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("per segment quota ratio can not be set to 0"))); + } + StringInfoData buf; + + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + + /* Get all targetOid which are related to this tablespace, and saved into rowIds */ + initStringInfo(&buf); + appendStringInfo(&buf, + "SELECT true FROM diskquota.target as t, diskquota.quota_config as q WHERE tablespaceOid = %u AND (t.quotaType = %d OR t.quotaType = %d) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", spcoid, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA); + + ret = SPI_execute(buf.data, true, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select target and quota setting table: error code %d", ret); + if (SPI_processed <= 0) + { + ereport(ERROR, + (errmsg("there are no roles or schema quota configed for this tablespace: %s, can't config per segment ratio for it", spcname))); + } + resetStringInfo(&buf); + appendStringInfo(&buf, + "UPDATE diskquota.quota_config AS q set segratio = %f FROM diskquota.target AS t WHERE q.targetOid = t.primaryOid AND (t.quotaType = %d OR t.quotaType = %d) AND t.quotaType = q.quotaType And t.tablespaceOid = %d", ratio, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA, spcoid); + /* + * UPDATEA NAMESPACE_TABLESPACE_PERSEG_QUOTA AND ROLE_TABLESPACE_PERSEG_QUOTA config for this tablespace + */ + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_UPDATE) + elog(ERROR, "cannot update item from quota setting table, error code %d", ret); + /* + * And finish our transaction. + */ + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * Get major version from extversion, and convert it to int + * 0 means an invalid major version. + */ +int +get_ext_major_version(void) +{ + int ret; + TupleDesc tupdesc; + HeapTuple tup; + Datum dat; + bool isnull; + char *extversion; + + ret = SPI_execute("select COALESCE(extversion,'') from pg_extension where extname = 'diskquota'", true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 1 || + ((tupdesc)->attrs[0])->atttypid != TEXTOID || SPI_processed != 1) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] can not get diskquota extesion version"))); + } + + tup = SPI_tuptable->vals[0]; + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] can not get diskquota extesion version"))); + extversion = TextDatumGetCString(dat); + if (extversion) + { + return (int)strtol(extversion, (char **) NULL, 10); + } + return 0; +} diff --git a/expected/test_mistake.out b/expected/test_mistake.out index e8d3dd7cacf..3eeb578e730 100644 --- a/expected/test_mistake.out +++ b/expected/test_mistake.out @@ -6,3 +6,30 @@ select nspname from pg_namespace where nspname = 'notfoundns'; select diskquota.set_schema_quota('notfoundns', '1 MB'); ERROR: schema "notfoundns" does not exist +DROP SCHEMA IF EXISTS nmistake; +NOTICE: schema "nmistake" does not exist, skipping +CREATE SCHEMA nmistake; +select diskquota.set_schema_quota('nmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +DROP ROLE IF EXISTS rmistake; +NOTICE: role "rmistake" does not exist, skipping +CREATE ROLE rmistake; +NOTICE: resource queue required -- using default resource queue "pg_default" +select diskquota.set_role_quota('rmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +-- start_ignore +\! mkdir /tmp/spcmistake +-- end_ignore +DROP TABLESPACE IF EXISTS spcmistake; +NOTICE: tablespace "spcmistake" does not exist, skipping +CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; +SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_per_segment_quota('spcmistake', 0); +ERROR: per segment quota ratio can not be set to 0 +DROP SCHEMA nmistake; +DROP ROLE rmistake; +DROP TABLESPACE spcmistake; +\! rm -rf /tmp/spcmistake diff --git a/expected/test_tablespace_role.out b/expected/test_tablespace_role.out index 5437ca48fe5..0e61c01c350 100644 --- a/expected/test_tablespace_role.out +++ b/expected/test_tablespace_role.out @@ -127,7 +127,7 @@ SELECT pg_sleep(5); INSERT INTO b SELECT generate_series(1,100); ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded -- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '0 MB'); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); set_role_tablespace_quota --------------------------- diff --git a/expected/test_tablespace_role_perseg.out b/expected/test_tablespace_role_perseg.out new file mode 100644 index 00000000000..9440989eb93 --- /dev/null +++ b/expected/test_tablespace_role_perseg.out @@ -0,0 +1,197 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +NOTICE: tablespace "rolespc_perseg" does not exist, skipping +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; +DROP ROLE IF EXISTS rolespc_persegu1; +NOTICE: role "rolespc_persegu1" does not exist, skipping +DROP ROLE IF EXISTS rolespc_persegu2; +NOTICE: role "rolespc_persegu2" does not exist, skipping +CREATE ROLE rolespc_persegu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespc_persegu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +NOTICE: tablespace "rolespc_perseg2" does not exist, skipping +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +\! rm -rf /tmp/rolespc_perseg; +\! rm -rf /tmp/rolespc_perseg2 diff --git a/expected/test_tablespace_schema.out b/expected/test_tablespace_schema.out index 52fdd26a378..132ae4b4edf 100644 --- a/expected/test_tablespace_schema.out +++ b/expected/test_tablespace_schema.out @@ -57,8 +57,8 @@ SELECT pg_sleep(10); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespcae_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespcae_in_bytes +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes -------------+-----------------+-------------+----------------------------- spcs1 | schemaspc | 1 | 4030464 (1 row) @@ -116,7 +116,7 @@ SELECT pg_sleep(5); INSERT INTO a SELECT generate_series(1,100); ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -- Test delete quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '0 MB'); +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); set_schema_tablespace_quota ----------------------------- diff --git a/expected/test_tablespace_schema_perseg.out b/expected/test_tablespace_schema_perseg.out new file mode 100644 index 00000000000..cffe25ba4a3 --- /dev/null +++ b/expected/test_tablespace_schema_perseg.out @@ -0,0 +1,198 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +NOTICE: tablespace "schemaspc_perseg" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc_perseg2; +NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- +(0 rows) + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE schemaspc_perseg2; +\! rm -rf /tmp/schemaspc_perseg +\! rm -rf /tmp/schemaspc_perseg2 diff --git a/gp_activetable.c b/gp_activetable.c index 1ebf8f3e475..b67e7960f08 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -222,10 +222,10 @@ gp_fetch_active_tables(bool is_init) Assert(Gp_role == GP_ROLE_DISPATCH); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; + ctl.hash = tag_hash; local_table_stats_map = hash_create("local active table map with relfilenode info", 1024, @@ -276,6 +276,15 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) { MemoryContext oldcontext; TupleDesc tupdesc; + int extMajorVersion; + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + } + extMajorVersion = get_ext_major_version(); + SPI_finish(); /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); @@ -311,8 +320,21 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) /* * prepare attribute metadata for next calls that generate the tuple */ - - tupdesc = CreateTemplateTupleDesc(2, false); + switch (extMajorVersion) + { + case 1: + tupdesc = CreateTemplateTupleDesc(2, false); + break; + case 2: + tupdesc = CreateTemplateTupleDesc(3, false); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "GP_SEGMENT_ID", + INT2OID, -1, 0); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } TupleDescInitEntry(tupdesc, (AttrNumber) 1, "TABLE_OID", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "TABLE_SIZE", @@ -348,15 +370,16 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) while ((results_entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&(cache->pos))) != NULL) { Datum result; - Datum values[2]; - bool nulls[2]; + Datum values[3]; + bool nulls[3]; HeapTuple tuple; memset(values, 0, sizeof(values)); memset(nulls, false, sizeof(nulls)); - values[0] = ObjectIdGetDatum(results_entry->tableoid); + values[0] = ObjectIdGetDatum(results_entry->reloid); values[1] = Int64GetDatum(results_entry->tablesize); + values[2] = Int16GetDatum(results_entry->segid); tuple = heap_form_tuple(funcctx->attinmeta->tupdesc, values, nulls); @@ -389,8 +412,10 @@ get_active_tables_stats(ArrayType *array) int bitmask; int i; Oid relOid; + int segId; HTAB *local_table = NULL; HASHCTL ctl; + TableEntryKey key; DiskQuotaActiveTableEntry *entry; Assert(ARR_ELEMTYPE(array) == OIDOID); @@ -406,10 +431,10 @@ get_active_tables_stats(ArrayType *array) bitmask = 1; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; + ctl.hash = tag_hash; local_table = hash_create("local table map", 1024, @@ -429,9 +454,13 @@ get_active_tables_stats(ArrayType *array) else { relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + segId = GpIdentity.segindex; + key.reloid = relOid; + key.segid = segId; - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &relOid, HASH_ENTER, NULL); - entry->tableoid = relOid; + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &key, HASH_ENTER, NULL); + entry->reloid = relOid; + entry->segid = segId; /* * avoid to generate ERROR if relOid is not existed (i.e. table @@ -525,6 +554,7 @@ get_active_tables_oid(void) LWLockRelease(diskquota_locks.active_table_lock); memset(&ctl, 0, sizeof(ctl)); + /* only use Oid as key here, segid is not needed */ ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); ctl.hcxt = CurrentMemoryContext; @@ -551,8 +581,10 @@ get_active_tables_oid(void) active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); if (active_table_entry) { - active_table_entry->tableoid = relOid; + active_table_entry->reloid = relOid; + /* we don't care segid and tablesize here */ active_table_entry->tablesize = 0; + active_table_entry->segid = -1; } hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); } @@ -593,16 +625,31 @@ load_table_size(HTAB *local_table_stats_map) TupleDesc tupdesc; int i; bool found; + TableEntryKey key; DiskQuotaActiveTableEntry *quota_entry; + int extMajorVersion = get_ext_major_version(); + switch (extMajorVersion) + { + case 1: + ret = SPI_execute("select tableid, size, CAST(-1 AS smallint) from diskquota.table_size", true, 0); + break; + case 2: + ret = SPI_execute("select tableid, size, segid from diskquota.table_size", true, 0); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } - ret = SPI_execute("select tableid, size from diskquota.table_size", true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: error code %d", errno))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 2 || + if (tupdesc->natts != 3 || ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT8OID) + ((tupdesc)->attrs[1])->atttypid != INT8OID || + ((tupdesc)->attrs[2])->atttypid != INT2OID) { ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," " please recreate diskquota extension", @@ -614,27 +661,35 @@ load_table_size(HTAB *local_table_stats_map) { HeapTuple tup = SPI_tuptable->vals[i]; Datum dat; - Oid tableOid; + Oid reloid; int64 size; + int16 segid; bool isnull; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (isnull) continue; - tableOid = DatumGetObjectId(dat); + reloid = DatumGetObjectId(dat); dat = SPI_getbinval(tup, tupdesc, 2, &isnull); if (isnull) continue; size = DatumGetInt64(dat); + dat = SPI_getbinval(tup, tupdesc, 3, &isnull); + if (isnull) + continue; + segid = DatumGetInt16(dat); + key.reloid = reloid; + key.segid = segid; quota_entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, - &tableOid, - HASH_ENTER, &found); - quota_entry->tableoid = tableOid; + local_table_stats_map, + &key, + HASH_ENTER, &found); + quota_entry->reloid = reloid; quota_entry->tablesize = size; + quota_entry->segid = segid; } return; } @@ -663,11 +718,11 @@ convert_map_to_string(HTAB *local_active_table_oid_maps) count++; if (count != nitems) { - appendStringInfo(&buffer, "%d,", entry->tableoid); + appendStringInfo(&buffer, "%d,", entry->reloid); } else { - appendStringInfo(&buffer, "%d", entry->tableoid); + appendStringInfo(&buffer, "%d", entry->reloid); } } appendStringInfo(&buffer, "}"); @@ -710,13 +765,12 @@ pull_active_list_from_seg(void) /* any errors will be catch in upper level */ CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); - for (i = 0; i < cdb_pgresults.numResults; i++) { - Oid tableOid; + Oid reloid; bool found; - struct pg_result *pgresult = cdb_pgresults.pg_results[i]; + PGresult *pgresult = cdb_pgresults.pg_results[i]; if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { @@ -729,14 +783,15 @@ pull_active_list_from_seg(void) /* push the active table oid into local_active_table_oid_map */ for (j = 0; j < PQntuples(pgresult); j++) { - tableOid = atooid(PQgetvalue(pgresult, j, 0)); + reloid = atooid(PQgetvalue(pgresult, j, 0)); - entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_oid_map, &tableOid, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_oid_map, &reloid, HASH_ENTER, &found); if (!found) { - entry->tableoid = tableOid; + entry->reloid = reloid; entry->tablesize = 0; + entry->segid = -1; } } } @@ -768,16 +823,25 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar CdbDispatchCommand(sql_command.data, DF_NONE, &cdb_pgresults); pfree(sql_command.data); + SEGCOUNT = cdb_pgresults.numResults; + if (SEGCOUNT <= 0 ) + { + ereport(ERROR, + (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + } + /* sum table size from each segment into local_table_stats_map */ for (i = 0; i < cdb_pgresults.numResults; i++) { Size tableSize; bool found; - Oid tableOid; + Oid reloid; + int segId; + TableEntryKey key; DiskQuotaActiveTableEntry *entry; - struct pg_result *pgresult = cdb_pgresults.pg_results[i]; + PGresult *pgresult = cdb_pgresults.pg_results[i]; if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { @@ -789,17 +853,39 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar for (j = 0; j < PQntuples(pgresult); j++) { - tableOid = atooid(PQgetvalue(pgresult, j, 0)); + reloid = atooid(PQgetvalue(pgresult, j, 0)); tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + key.reloid = reloid; + /* for diskquota extension version is 1.0, pgresult doesn't contain segid */ + if (PQnfields(pgresult) == 3) + { + /* get the segid, tablesize for each table */ + segId = atoi(PQgetvalue(pgresult, j, 2)); + key.segid = segId; + + entry = (DiskQuotaActiveTableEntry *) hash_search( + local_table_stats_map, &key, HASH_ENTER, &found); + + if (!found) + { + /* receive table size info from the first segment */ + entry->reloid = reloid; + entry->segid = segId; + } + entry->tablesize = tableSize; + } + /* when segid is -1, the tablesize is the sum of tablesize of master and all segments */ + key.segid = -1; entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, &tableOid, HASH_ENTER, &found); + local_table_stats_map, &key, HASH_ENTER, &found); if (!found) { /* receive table size info from the first segment */ - entry->tableoid = tableOid; + entry->reloid = reloid; entry->tablesize = tableSize; + entry->segid = -1; } else { diff --git a/gp_activetable.h b/gp_activetable.h index 44a54f5f128..09bdebf250e 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -12,9 +12,16 @@ typedef struct DiskQuotaActiveTableFileEntry Oid tablespaceoid; } DiskQuotaActiveTableFileEntry; +typedef struct TableEntryKey +{ + Oid reloid; + int segid; +} TableEntryKey; + typedef struct DiskQuotaActiveTableEntry { - Oid tableoid; + Oid reloid; + int segid; Size tablesize; } DiskQuotaActiveTableEntry; diff --git a/init_file b/init_file index 5261e4efb5d..4f7aa9851f6 100644 --- a/init_file +++ b/init_file @@ -9,4 +9,6 @@ m/diskquota.c:\d+\)/ s/diskquota.c:\d+\)/diskquota.c:xxx/ m/diskquota_utility.c:\d+\)/ s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ +m/^CONTEXT:*/ +s/^CONTEXT:/DETAIL:/ -- end_matchsubs diff --git a/quotamodel.c b/quotamodel.c index 13a83000f4c..75790cabf77 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -43,9 +43,11 @@ #include "utils/syscache.h" #include +#include #include "cdb/cdbvars.h" #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" +#include "cdb/cdbutil.h" #include "gp_activetable.h" #include "diskquota.h" @@ -63,15 +65,19 @@ typedef struct NamespaceSizeEntry NamespaceSizeEntry; typedef struct RoleSizeEntry RoleSizeEntry; typedef struct QuotaLimitEntry QuotaLimitEntry; typedef struct BlackMapEntry BlackMapEntry; +typedef struct GlobalBlackMapEntry GlobalBlackMapEntry; typedef struct LocalBlackMapEntry LocalBlackMapEntry; + +int SEGCOUNT = 0; /* * local cache of table disk size and corresponding schema and owner */ struct TableSizeEntry { - Oid reloid; - Oid tablespace_oid; + Oid reloid; + int16 segid; + Oid tablespaceoid; Oid namespaceoid; Oid owneroid; int64 totalsize; /* table size including fsm, visibility map @@ -81,8 +87,14 @@ struct TableSizeEntry bool need_flush; /* whether need to flush to table table_size */ }; +struct QuotaMapEntryKey { + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; + int16 segid; +}; + struct QuotaMapEntry { Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; + int16 segid; int64 size; int64 limit; }; @@ -126,15 +138,22 @@ struct BlackMapEntry { Oid targetoid; Oid databaseoid; - Oid tablespace_oid; + Oid tablespaceoid; uint32 targettype; }; +struct GlobalBlackMapEntry +{ + BlackMapEntry keyitem; + bool segexceeded; +}; + /* local blacklist for which exceed their quota limit */ struct LocalBlackMapEntry { BlackMapEntry keyitem; bool isexceeded; + bool segexceeded; }; /* using hash table to support incremental update the table size entry.*/ @@ -148,14 +167,14 @@ static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ static void init_all_quota_maps(void); -static void update_size_for_quota(int64 size, QuotaType type, Oid* keys); -static void update_limit_for_quota(int64 limit, QuotaType type, Oid* keys); -static void remove_quota(QuotaType type, Oid* keys); -static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespace_oid); +static void update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid); +static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys); +static void remove_quota(QuotaType type, Oid* keys, int16 segid); +static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); static void check_quota_map(QuotaType type); static void clear_all_quota_maps(void); static void vacuum_all_quota_maps(void); -static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys); +static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys, int16 segid); /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); @@ -170,8 +189,8 @@ static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); static void init_lwlocks(void); -static void truncateStringInfo(StringInfo str, int nchars); -static void export_exceeded_error(BlackMapEntry *blackentry); +static void export_exceeded_error(GlobalBlackMapEntry *entry); +void truncateStringInfo(StringInfo str, int nchars); static void init_all_quota_maps(void) @@ -181,15 +200,8 @@ init_all_quota_maps(void) hash_ctl.hcxt = TopMemoryContext; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - hash_ctl.keysize = quota_info[type].num_keys * sizeof(Oid); - if (quota_info[type].num_keys == 1) - { - hash_ctl.hash = oid_hash; - } - else - { - hash_ctl.hash = tag_hash; - } + hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); + hash_ctl.hash = tag_hash; if (quota_info[type].map != NULL) { hash_destroy(quota_info[type].map); @@ -201,16 +213,20 @@ init_all_quota_maps(void) /* add a new entry quota or update the old entry quota */ static void -update_size_for_quota(int64 size, QuotaType type, Oid* keys) +update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid) { bool found; + struct QuotaMapEntryKey key = {0}; + memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); + key.segid = segid; struct QuotaMapEntry *entry = hash_search( - quota_info[type].map, keys, HASH_ENTER, &found); + quota_info[type].map, &key, HASH_ENTER, &found); if (!found) { entry->size = size; entry->limit = -1; memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); + entry->segid = key.segid; } else { @@ -220,24 +236,41 @@ update_size_for_quota(int64 size, QuotaType type, Oid* keys) /* add a new entry quota or update the old entry limit */ static void -update_limit_for_quota(int64 limit, QuotaType type, Oid* keys) +update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys) { bool found; - struct QuotaMapEntry *entry = hash_search( - quota_info[type].map, keys, HASH_ENTER, &found); - if (!found) + for ( int i = -1; i < SEGCOUNT ; i++) { - entry->size = 0; - memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); + struct QuotaMapEntryKey key = {0}; + memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); + key.segid = i; + struct QuotaMapEntry *entry = hash_search( + quota_info[type].map, &key, HASH_ENTER, &found); + if (!found) + { + entry->size = 0; + memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); + entry->segid = key.segid; + } + if (key.segid == -1) + { + entry->limit = limit; + } + else + { + entry->limit = round((limit / SEGCOUNT) * segratio); + } } - entry->limit = limit; } /* remove a entry quota from the map */ static void -remove_quota(QuotaType type, Oid* keys) +remove_quota(QuotaType type, Oid* keys, int16 segid) { - hash_search(quota_info[type].map, keys, HASH_REMOVE, NULL); + struct QuotaMapEntryKey key = {0}; + memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); + key.segid = segid; + hash_search(quota_info[type].map, &key, HASH_REMOVE, NULL); } /* @@ -245,20 +278,22 @@ remove_quota(QuotaType type, Oid* keys) * Put them into local blacklist if quota limit is exceeded. */ static void -add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespace_oid) +add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded) { + LocalBlackMapEntry *localblackentry; BlackMapEntry keyitem = {0}; keyitem.targetoid = targetOid; keyitem.databaseoid = MyDatabaseId; - keyitem.tablespace_oid = tablespace_oid; + keyitem.tablespaceoid = tablespaceoid; keyitem.targettype = (uint32) type; ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist", targetOid))); localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, - &keyitem, - HASH_ENTER, NULL); + &keyitem, + HASH_ENTER, NULL); localblackentry->isexceeded = true; + localblackentry->segexceeded = segexceeded; } @@ -284,23 +319,24 @@ check_quota_map(QuotaType type) tuple = SearchSysCache1(quota_info[type].sys_cache[i], ObjectIdGetDatum(entry->keys[i])); if (!HeapTupleIsValid(tuple)) { - remove_quota(type, entry->keys); + remove_quota(type, entry->keys, entry->segid); removed = true; break; } ReleaseSysCache(tuple); } - if (!removed) + if (!removed && entry->limit > 0) { - if (entry->limit >= 0 && entry->size >= entry->limit) + if (entry->size >= entry->limit) { Oid targetOid = entry->keys[0]; - Oid tablespace_oid = + Oid tablespaceoid = (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) ? entry->keys[1] : InvalidOid; - /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespace_oid + /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespaceoid * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid */ - add_quota_to_blacklist(type, targetOid, tablespace_oid); + bool segmentExceeded = entry->segid == -1 ? false : true; + add_quota_to_blacklist(type, targetOid, tablespaceoid, segmentExceeded); } } } @@ -308,10 +344,10 @@ check_quota_map(QuotaType type) /* transfer one table's size from one quota to another quota */ static void -transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys) +transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys, int16 segid) { - update_size_for_quota(-totalsize, type, old_keys); - update_size_for_quota(totalsize, type, new_keys); + update_size_for_quota(-totalsize, type, old_keys, segid); + update_size_for_quota(totalsize, type, new_keys, segid); } static void @@ -340,7 +376,7 @@ vacuum_all_quota_maps(void) { { if (entry->limit == -1) { - remove_quota(type, entry->keys); + remove_quota(type, entry->keys, entry->segid); } } @@ -401,7 +437,7 @@ disk_quota_shmem_startup(void) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(BlackMapEntry); - hash_ctl.entrysize = sizeof(BlackMapEntry); + hash_ctl.entrysize = sizeof(GlobalBlackMapEntry); hash_ctl.hash = tag_hash; disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", @@ -456,7 +492,7 @@ DiskQuotaShmemSize(void) Size size; size = sizeof(ExtensionDDLMessage); - size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(BlackMapEntry))); + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(GlobalBlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); return size; @@ -474,10 +510,10 @@ init_disk_quota_model(void) /* initialize hash table for table/schema/role etc. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); + hash_ctl.keysize = sizeof(TableEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = oid_hash; + hash_ctl.hash = tag_hash; table_size_map = hash_create("TableSizeEntry map", 1024 * 8, @@ -718,10 +754,6 @@ refresh_disk_quota_usage(bool is_init) * size from table table_size */ -/* FIXME: we should only care about the tables whose role, schema, or tablespace - * has quota, this may improve the performance especially when too many tables - * in the database - */ static void calculate_table_disk_usage(bool is_init) { @@ -736,6 +768,7 @@ calculate_table_disk_usage(bool is_init) HASH_SEQ_STATUS iter; HTAB *local_active_table_stat_map; DiskQuotaActiveTableEntry *active_table_entry; + TableEntryKey key; classRel = heap_open(RelationRelationId, AccessShareLock); relScan = heap_beginscan_catalog(classRel, 0, NULL); @@ -774,116 +807,135 @@ calculate_table_disk_usage(bool is_init) if (relOid < FirstNormalObjectId) continue; - tsentry = (TableSizeEntry *) hash_search(table_size_map, - &relOid, - HASH_ENTER, &table_size_map_found); - - if (!table_size_map_found) - { - tsentry->reloid = relOid; - tsentry->totalsize = 0; - tsentry->owneroid = InvalidOid; - tsentry->namespaceoid = InvalidOid; - tsentry->tablespace_oid = InvalidOid; - tsentry->need_flush = true; - } - - /* mark tsentry is_exist */ - if (tsentry) - tsentry->is_exist = true; - - active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &relOid, HASH_FIND, &active_tbl_found); - - /* skip to recalculate the tables which are not in active list */ - if (active_tbl_found) + /* + * The segid is the same as the content id in gp_segment_configuration + * and the content id is continuous, so it's safe to use SEGCOUNT + * to get segid. + */ + for (int i = -1; i < SEGCOUNT; i++) { - /* pretend process as utility mode, and append the table size on master */ - Gp_role = GP_ROLE_UTILITY; + key.segid = i; + key.reloid = relOid; + tsentry = (TableSizeEntry *) hash_search(table_size_map, + &key, + HASH_ENTER, &table_size_map_found); - /* DirectFunctionCall1 may fail, since table maybe dropped by other backend */ - PG_TRY(); - { - /* call pg_total_relation_size to get the active table size */ - active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); - } - PG_CATCH(); + if (!table_size_map_found) { - HOLD_INTERRUPTS(); - FlushErrorState(); - RESUME_INTERRUPTS(); + tsentry->reloid = relOid; + tsentry->segid = key.segid; + tsentry->totalsize = 0; + tsentry->owneroid = InvalidOid; + tsentry->namespaceoid = InvalidOid; + tsentry->tablespaceoid = InvalidOid; + tsentry->need_flush = true; } - PG_END_TRY(); - Gp_role = GP_ROLE_DISPATCH; + /* mark tsentry is_exist */ + if (tsentry) + tsentry->is_exist = true; + active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &key, HASH_FIND, &active_tbl_found); - /* firstly calculate the updated total size of a table */ - updated_total_size = active_table_entry->tablesize - tsentry->totalsize; + /* skip to recalculate the tables which are not in active list */ + if (active_tbl_found) + { + if (key.segid == -1) + { + /* pretend process as utility mode, and append the table size on master */ + Gp_role = GP_ROLE_UTILITY; + + /* DirectFunctionCall1 may fail, since table maybe dropped by other backend */ + PG_TRY(); + { + /* call pg_total_relation_size to get the active table size */ + active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); + } + PG_CATCH(); + { + HOLD_INTERRUPTS(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + + Gp_role = GP_ROLE_DISPATCH; - /* update the table_size entry */ - tsentry->totalsize = (int64) active_table_entry->tablesize; - tsentry->need_flush = true; + } + /* firstly calculate the updated total size of a table */ + updated_total_size = active_table_entry->tablesize - tsentry->totalsize; - /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does not exist in the table_size_map */ - update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}); - update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}); - update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}); - update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}); - } + /* update the table_size entry */ + tsentry->totalsize = (int64) active_table_entry->tablesize; + tsentry->need_flush = true; - /* table size info doesn't need to flush at init quota model stage */ - if (is_init) - { - tsentry->need_flush = false; - } + /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does not exist in the table_size_map */ + update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, key.segid); + update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, key.segid); + update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, key.segid); + update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, key.segid); - /* if schema change, transfer the file size */ - if (tsentry->namespaceoid != classForm->relnamespace) - { - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid}, - (Oid[]){classForm->relnamespace}); - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}, - (Oid[]){classForm->relnamespace, tsentry->tablespace_oid}); - tsentry->namespaceoid = classForm->relnamespace; - } - /* if owner change, transfer the file size */ - if (tsentry->owneroid != classForm->relowner) - { - transfer_table_for_quota( - tsentry->totalsize, - ROLE_QUOTA, - (Oid[]){tsentry->owneroid}, - (Oid[]){classForm->relowner} - ); - transfer_table_for_quota( - tsentry->totalsize, - ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}, - (Oid[]){classForm->relowner, tsentry->tablespace_oid} - ); - tsentry->owneroid = classForm->relowner; - } + } + /* table size info doesn't need to flush at init quota model stage */ + if (is_init) + { + tsentry->need_flush = false; + } - if (tsentry->tablespace_oid != classForm->reltablespace) - { - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}, - (Oid[]){tsentry->namespaceoid, classForm->reltablespace} - ); - transfer_table_for_quota( - tsentry->totalsize, - ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}, - (Oid[]){tsentry->owneroid, classForm->reltablespace} - ); - tsentry->tablespace_oid = classForm->reltablespace; + /* if schema change, transfer the file size */ + if (tsentry->namespaceoid != classForm->relnamespace) + { + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, + (Oid[]){classForm->relnamespace}, + key.segid); + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, + (Oid[]){classForm->relnamespace, tsentry->tablespaceoid}, + key.segid); + tsentry->namespaceoid = classForm->relnamespace; + } + /* if owner change, transfer the file size */ + if (tsentry->owneroid != classForm->relowner) + { + transfer_table_for_quota( + tsentry->totalsize, + ROLE_QUOTA, + (Oid[]){tsentry->owneroid}, + (Oid[]){classForm->relowner}, + key.segid + ); + transfer_table_for_quota( + tsentry->totalsize, + ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, + (Oid[]){classForm->relowner, tsentry->tablespaceoid}, + key.segid + ); + tsentry->owneroid = classForm->relowner; + } + + if (tsentry->tablespaceoid != classForm->reltablespace) + { + transfer_table_for_quota( + tsentry->totalsize, + NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, + (Oid[]){tsentry->namespaceoid, classForm->reltablespace}, + key.segid + ); + transfer_table_for_quota( + tsentry->totalsize, + ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, + (Oid[]){tsentry->owneroid, classForm->reltablespace}, + key.segid + ); + tsentry->tablespaceoid = classForm->reltablespace; + } } } @@ -900,10 +952,10 @@ calculate_table_disk_usage(bool is_init) { if (tsentry->is_exist == false) { - update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}); - update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}); - update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespace_oid}); - update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespace_oid}); + update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, tsentry->segid); + update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, tsentry->segid); + update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, tsentry->segid); + update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, tsentry->segid); } } } @@ -923,13 +975,27 @@ flush_to_table_size(void) StringInfoData insert_statement; bool delete_statement_flag = false; bool insert_statement_flag = false; - int ret; + int ret; + int extMajorVersion= get_ext_major_version(); /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ /* concatenate all the need_to_flush table to SQL string */ initStringInfo(&delete_statement); - appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ("); + switch (extMajorVersion) + { + case 1: + appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ( "); + break; + case 2: + appendStringInfo(&delete_statement, "delete from diskquota.table_size where (tableid, segid) in ( "); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } + initStringInfo(&insert_statement); appendStringInfo(&insert_statement, "insert into diskquota.table_size values "); hash_seq_init(&iter, table_size_map); @@ -938,7 +1004,19 @@ flush_to_table_size(void) /* delete dropped table from both table_size_map and table table_size */ if (tsentry->is_exist == false) { - appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + switch (extMajorVersion) + { + case 1: + appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + break; + case 2: + appendStringInfo(&delete_statement, "(%u,%d), ", tsentry->reloid, tsentry->segid); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } delete_statement_flag = true; hash_search(table_size_map, @@ -949,10 +1027,28 @@ flush_to_table_size(void) else if (tsentry->need_flush == true) { tsentry->need_flush = false; - appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); - appendStringInfo(&insert_statement, "(%u,%ld), ", tsentry->reloid, tsentry->totalsize); - delete_statement_flag = true; - insert_statement_flag = true; + switch (extMajorVersion) + { + case 1: + if (tsentry->segid == -1) + { + appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + appendStringInfo(&insert_statement, "(%u,%ld), ", tsentry->reloid, tsentry->totalsize); + delete_statement_flag = true; + insert_statement_flag = true; + } + break; + case 2: + appendStringInfo(&delete_statement, "(%u,%d), ", tsentry->reloid, tsentry->segid); + appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); + delete_statement_flag = true; + insert_statement_flag = true; + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } } } truncateStringInfo(&delete_statement, delete_statement.len - strlen(", ")); @@ -986,7 +1082,7 @@ flush_local_black_map(void) { HASH_SEQ_STATUS iter; LocalBlackMapEntry *localblackentry; - BlackMapEntry *blackentry; + GlobalBlackMapEntry *blackentry; bool found; LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); @@ -996,7 +1092,7 @@ flush_local_black_map(void) { if (localblackentry->isexceeded) { - blackentry = (BlackMapEntry *) hash_search(disk_quota_black_map, + blackentry = (GlobalBlackMapEntry *) hash_search(disk_quota_black_map, (void *) &localblackentry->keyitem, HASH_ENTER_NULL, &found); if (blackentry == NULL) @@ -1010,12 +1106,16 @@ flush_local_black_map(void) /* new db objects which exceed quota limit */ if (!found) { - blackentry->targetoid = localblackentry->keyitem.targetoid; - blackentry->databaseoid = MyDatabaseId; - blackentry->targettype = localblackentry->keyitem.targettype; + blackentry->keyitem.targetoid = localblackentry->keyitem.targetoid; + blackentry->keyitem.databaseoid = MyDatabaseId; + blackentry->keyitem.targettype = localblackentry->keyitem.targettype; + blackentry->keyitem.tablespaceoid = localblackentry->keyitem.tablespaceoid; + blackentry->segexceeded = localblackentry->segexceeded; } } + blackentry->segexceeded = localblackentry->segexceeded; localblackentry->isexceeded = false; + localblackentry->segexceeded = false; } else { @@ -1034,7 +1134,7 @@ flush_local_black_map(void) /* * Make sure a StringInfo's string is no longer than 'nchars' characters. */ -static void +void truncateStringInfo(StringInfo str, int nchars) { if (str && @@ -1109,6 +1209,7 @@ do_load_quotas(void) int ret; TupleDesc tupdesc; int i; + int extMajorVersion; /* * TODO: we should skip to reload quota config when there is no change in @@ -1116,24 +1217,17 @@ do_load_quotas(void) * config change. */ clear_all_quota_maps(); - const unsigned int NUM_ATTRIBUTES = 4; + const unsigned int NUM_ATTRIBUTES = 5; + extMajorVersion = get_ext_major_version(); /* * read quotas from diskquota.quota_config and target table */ - Oid nsoid = get_namespace_oid("diskquota", false); - if (nsoid == InvalidOid) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] diskquota schema doesn't exist in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); - Oid targetTableOid = get_relname_relid("target", nsoid); /* - * For diskquota 1.0, there is no target table in diskquota schema. + * We need to check the extension version. * Why do we need this? - * As when we upgrade diskquota extension from 1.0 to another version, + * As when we upgrade diskquota extension from an old to a new version, * we need firstly reload the new diskquota.so and then execute the * upgrade SQL. However, between the 2 steps, the new diskquota.so * needs to work with the old version diskquota sql file, otherwise, @@ -1141,16 +1235,21 @@ do_load_quotas(void) * Maybe this is not the best sulotion, only a work arround. Optimizing * the init procedure is a better solution. */ - if (targetTableOid == InvalidOid) - { - ret = SPI_execute("select targetoid, quotatype, quotalimitMB, 0 as tablespaceoid from diskquota.quota_config", true, 0); - } - else + switch (extMajorVersion) { - ret = SPI_execute( - "SELECT targetOid, c.quotaType, quotalimitMB, COALESCE(tablespaceoid, 0)" - "FROM diskquota.quota_config c LEFT OUTER JOIN diskquota.target t " - "ON c.targetOid = t.primaryOid and c.quotatype = t.quotatype", true, 0); + case 1: + ret = SPI_execute("select targetoid, quotatype, quotalimitMB, 0 as segratio, 0 as tablespaceoid from diskquota.quota_config", true, 0); + break; + case 2: + ret = SPI_execute( + "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, COALESCE(t.tablespaceoid, 0) AS tablespaceoid " + "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " + "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", true, 0); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } if (ret != SPI_OK_SELECT) ereport(ERROR, @@ -1190,7 +1289,8 @@ do_load_quotas(void) Oid targetOid = DatumGetObjectId(vals[0]); int quotaType = (QuotaType) DatumGetInt32(vals[1]); int64 quota_limit_mb = DatumGetInt64(vals[2]); - Oid spcOid = DatumGetObjectId(vals[3]); + float segratio = DatumGetFloat4(vals[3]); + Oid spcOid = DatumGetObjectId(vals[4]); if (spcOid == InvalidOid) { @@ -1198,11 +1298,11 @@ do_load_quotas(void) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d", quotaType))); } - update_limit_for_quota(quota_limit_mb * (1 << 20), quotaType, (Oid[]){targetOid}); + update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); } else { - update_limit_for_quota(quota_limit_mb * (1 << 20), quotaType, (Oid[]){targetOid, spcOid}); + update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid, spcOid}); } } @@ -1214,7 +1314,7 @@ do_load_quotas(void) * Given table oid, search for namespace and owner. */ static bool -get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *tablespace_oid) +get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *tablespaceoid) { HeapTuple tp; @@ -1226,7 +1326,7 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table *ownerOid = reltup->relowner; *nsOid = reltup->relnamespace; - *tablespace_oid = reltup->reltablespace; + *tablespaceoid = reltup->reltablespace; ReleaseSysCache(tp); } return found; @@ -1242,16 +1342,17 @@ quota_check_common(Oid reloid) { Oid ownerOid = InvalidOid; Oid nsOid = InvalidOid; - Oid tablespace_oid = InvalidOid; + Oid tablespaceoid = InvalidOid; bool found; BlackMapEntry keyitem; + GlobalBlackMapEntry *entry; if (!IsTransactionState()) { return true; } - bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespace_oid); + bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespaceoid); if (!found_rel) { return true; @@ -1275,22 +1376,22 @@ quota_check_common(Oid reloid) } if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) { - keyitem.tablespace_oid = tablespace_oid; + keyitem.tablespaceoid = tablespaceoid; } else { /* refer to add_quota_to_blacklist */ - keyitem.tablespace_oid = InvalidOid; + keyitem.tablespaceoid = InvalidOid; } keyitem.databaseoid = MyDatabaseId; keyitem.targettype = type; - hash_search(disk_quota_black_map, + entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); if (found) { LWLockRelease(diskquota_locks.black_map_lock); - export_exceeded_error(&keyitem); + export_exceeded_error(entry); return false; } } @@ -1320,8 +1421,9 @@ invalidate_database_blackmap(Oid dbid) } static void -export_exceeded_error(BlackMapEntry *blackentry) +export_exceeded_error(GlobalBlackMapEntry *entry) { + BlackMapEntry *blackentry = &entry->keyitem; switch(blackentry->targettype) { case NAMESPACE_QUOTA: @@ -1335,14 +1437,26 @@ export_exceeded_error(BlackMapEntry *blackentry) errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(blackentry->targetoid)))); break; case NAMESPACE_TABLESPACE_QUOTA: - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded", get_tablespace_name(blackentry->tablespace_oid), get_namespace_name(blackentry->targetoid)))); + if (entry->segexceeded) + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", get_tablespace_name(blackentry->tablespaceoid), get_namespace_name(blackentry->targetoid)))); + else + + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace:%s schema:%s diskquota exceeded", get_tablespace_name(blackentry->tablespaceoid), get_namespace_name(blackentry->targetoid)))); break; case ROLE_TABLESPACE_QUOTA: - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace: %s role: %s diskquota exceeded", get_tablespace_name(blackentry->tablespace_oid), GetUserNameFromId(blackentry->targetoid)))); + if (entry->segexceeded) + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace: %s role: %s diskquota exceeded per segment quota", get_tablespace_name(blackentry->tablespaceoid), GetUserNameFromId(blackentry->targetoid)))); + else + + ereport(ERROR, + (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace: %s role: %s diskquota exceeded", get_tablespace_name(blackentry->tablespaceoid), GetUserNameFromId(blackentry->targetoid)))); break; default : ereport(ERROR, diff --git a/sql/test_mistake.sql b/sql/test_mistake.sql index 55fbe322d27..9a1cbf20f58 100644 --- a/sql/test_mistake.sql +++ b/sql/test_mistake.sql @@ -1,3 +1,25 @@ -- to make sure that the schema 'notfoundns' is really not found select nspname from pg_namespace where nspname = 'notfoundns'; select diskquota.set_schema_quota('notfoundns', '1 MB'); + +DROP SCHEMA IF EXISTS nmistake; +CREATE SCHEMA nmistake; +select diskquota.set_schema_quota('nmistake', '0 MB'); + +DROP ROLE IF EXISTS rmistake; +CREATE ROLE rmistake; +select diskquota.set_role_quota('rmistake', '0 MB'); + +-- start_ignore +\! mkdir /tmp/spcmistake +-- end_ignore +DROP TABLESPACE IF EXISTS spcmistake; +CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; +SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); +SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); +SELECT diskquota.set_per_segment_quota('spcmistake', 0); + +DROP SCHEMA nmistake; +DROP ROLE rmistake; +DROP TABLESPACE spcmistake; +\! rm -rf /tmp/spcmistake diff --git a/sql/test_tablespace_role.sql b/sql/test_tablespace_role.sql index 1094f000322..31d36f79309 100644 --- a/sql/test_tablespace_role.sql +++ b/sql/test_tablespace_role.sql @@ -70,7 +70,7 @@ SELECT pg_sleep(5); INSERT INTO b SELECT generate_series(1,100); -- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '0 MB'); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); SELECT pg_sleep(5); -- expect insert success INSERT INTO b SELECT generate_series(1,100); diff --git a/sql/test_tablespace_role_perseg.sql b/sql/test_tablespace_role_perseg.sql new file mode 100644 index 00000000000..68a7f426c99 --- /dev/null +++ b/sql/test_tablespace_role_perseg.sql @@ -0,0 +1,95 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; + +DROP ROLE IF EXISTS rolespc_persegu1; +DROP ROLE IF EXISTS rolespc_persegu2; +CREATE ROLE rolespc_persegu1 NOLOGIN; +CREATE ROLE rolespc_persegu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +ALTER TABLE b OWNER TO rolespc_persegu1; + +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); +SELECT pg_sleep(5); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); +SELECT pg_sleep(20); +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +\! rm -rf /tmp/rolespc_perseg; +\! rm -rf /tmp/rolespc_perseg2 diff --git a/sql/test_tablespace_schema.sql b/sql/test_tablespace_schema.sql index 0b541d00618..717550bca62 100644 --- a/sql/test_tablespace_schema.sql +++ b/sql/test_tablespace_schema.sql @@ -31,7 +31,7 @@ ALTER TABLE spcs2.a SET SCHEMA spcs1; SELECT pg_sleep(10); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespcae_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; -- Test alter tablespace -- start_ignore @@ -60,7 +60,7 @@ SELECT pg_sleep(5); INSERT INTO a SELECT generate_series(1,100); -- Test delete quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '0 MB'); +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); SELECT pg_sleep(5); -- expect insert success INSERT INTO a SELECT generate_series(1,100); diff --git a/sql/test_tablespace_schema_perseg.sql b/sql/test_tablespace_schema_perseg.sql new file mode 100644 index 00000000000..8e40445e5a6 --- /dev/null +++ b/sql/test_tablespace_schema_perseg.sql @@ -0,0 +1,89 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); +SET search_path TO spcs1_perseg; + +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); +SELECT pg_sleep(5); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT pg_sleep(5); +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc_perseg2; +CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +SELECT pg_sleep(5); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); +SELECT pg_sleep(5); +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE schemaspc_perseg2; +\! rm -rf /tmp/schemaspc_perseg +\! rm -rf /tmp/schemaspc_perseg2 + diff --git a/upgrade_test/diskquota_schedule_upgrade b/upgrade_test/diskquota_schedule_upgrade index fe44b7a10f7..c650f177a4f 100644 --- a/upgrade_test/diskquota_schedule_upgrade +++ b/upgrade_test/diskquota_schedule_upgrade @@ -1,4 +1,16 @@ +# Test new version diskquota with old extension script +test: install_old_version +test: init +test: prepare +test: set_config +# upgrade diskquota.so to new version +test: install_new_version +test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota +test: clean + +# Test upgrade extension to new version # run by old version diskquota +test: install_old_version test: init test: prepare test: set_config @@ -6,5 +18,5 @@ test: set_config test: install_new_version test: upgrade_extension # run by new version diskquota -test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota +test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota test_tablespace_schema test_tablespace_role test_tablespace_schema_perseg test_tablespace_role_perseg test: clean diff --git a/upgrade_test/expected/clean.out b/upgrade_test/expected/clean.out index 4712add5c30..1090a18ac34 100644 --- a/upgrade_test/expected/clean.out +++ b/upgrade_test/expected/clean.out @@ -1,4 +1,44 @@ -DROP TABLE badquota.t1; -DROP ROLE testbody; -DROP SCHEMA badquota; +DROP TABLE IF EXISTS badquota.t1; +DROP TABLE IF EXISTS badbody_schema.t2; +DROP ROLE IF EXISTS testbody; +DROP SCHEMA IF EXISTS badquota; +DROP ROLE IF EXISTS badbody; +DROP SCHEMA IF EXISTS badbody_scehma; +NOTICE: schema "badbody_scehma" does not exist, skipping +DROP SCHEMA IF EXISTS deleteschema; +NOTICE: schema "deleteschema" does not exist, skipping +DROP SCHEMA IF EXISTS srs1; +NOTICE: schema "srs1" does not exist, skipping +DROP SCHEMA IF EXISTS srr1; +NOTICE: schema "srr1" does not exist, skipping +DROP SCHEMA IF EXISTS srE; +NOTICE: schema "sre" does not exist, skipping +DROP SCHEMA IF EXISTS s1; +NOTICE: schema "s1" does not exist, skipping +DROP SCHEMA IF EXISTS s2; +NOTICE: schema "s2" does not exist, skipping +DROP SCHEMA IF EXISTS s3; +NOTICE: schema "s3" does not exist, skipping +DROP SCHEMA IF EXISTS s4; +NOTICE: schema "s4" does not exist, skipping +DROP SCHEMA IF EXISTS s5; +DROP SCHEMA IF EXISTS s6; +NOTICE: schema "s6" does not exist, skipping +DROP SCHEMA IF EXISTS s7; +DROP TABLE IF EXISTS b; +NOTICE: table "b" does not exist, skipping +DROP TABLE IF EXISTS b2; +NOTICE: table "b2" does not exist, skipping +DROP ROLE IF EXISTS srerole; +NOTICE: role "srerole" does not exist, skipping +DROP ROLE IF EXISTS srole; +NOTICE: role "srole" does not exist, skipping +DROP ROLE IF EXISTS strole; +NOTICE: role "strole" does not exist, skipping +DROP ROLE IF EXISTS u1; +NOTICE: role "u1" does not exist, skipping +DROP ROLE IF EXISTS u2; +NOTICE: role "u2" does not exist, skipping +DROP ROLE IF EXISTS u3temp; +NOTICE: role "u3temp" does not exist, skipping DROP EXTENSION diskquota; diff --git a/upgrade_test/expected/install_old_version.out b/upgrade_test/expected/install_old_version.out new file mode 100644 index 00000000000..1622491202c --- /dev/null +++ b/upgrade_test/expected/install_old_version.out @@ -0,0 +1,2 @@ +\! install_old_version_diskquota +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/prepare.out b/upgrade_test/expected/prepare.out index 80f84f0c281..25b295b46f6 100644 --- a/upgrade_test/expected/prepare.out +++ b/upgrade_test/expected/prepare.out @@ -46,3 +46,31 @@ SELECT pg_sleep(10); -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); ERROR: schema's disk space quota exceeded with name:badquota +-- prepare a role that has reached quota limit +DROP SCHEMA IF EXISTS badbody_schema; +NOTICE: schema "badbody_schema" does not exist, skipping +CREATE SCHEMA badbody_schema; +DROP ROLE IF EXISTS badbody; +NOTICE: role "badbody" does not exist, skipping +CREATE ROLE badbody; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('badbody', '2 MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE badbody_schema.t2(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE badbody_schema.t2 OWNER TO badbody; +INSERT INTO badbody_schema.t2 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +-- expect fail +INSERT INTO badbody_schema.t2 SELECT generate_series(0, 10); +ERROR: role's disk space quota exceeded with name:badbody diff --git a/upgrade_test/expected/test_role.out b/upgrade_test/expected/test_role.out index bfbc5960aa3..beee72011a3 100644 --- a/upgrade_test/expected/test_role.out +++ b/upgrade_test/expected/test_role.out @@ -36,6 +36,10 @@ SELECT pg_sleep(20); INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO badbody; +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name:badbody DROP TABLE b, b2; DROP ROLE u1, u2; RESET search_path; diff --git a/upgrade_test/expected/test_tablespace_role.out b/upgrade_test/expected/test_tablespace_role.out new file mode 100644 index 00000000000..0e61c01c350 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_role.out @@ -0,0 +1,151 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +NOTICE: tablespace "rolespc" does not exist, skipping +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; +DROP ROLE IF EXISTS rolespcu1; +NOTICE: role "rolespcu1" does not exist, skipping +DROP ROLE IF EXISTS rolespcu2; +NOTICE: role "rolespcu2" does not exist, skipping +CREATE ROLE rolespcu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespcu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b OWNER TO rolespcu1; +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b2 OWNER TO rolespcu1; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + rolespcu1 | rolespc | 1 | 4194304 +(1 row) + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +NOTICE: tablespace "rolespc2" does not exist, skipping +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +\! rm -rf /tmp/rolespc; +\! rm -rf /tmp/rolespc2 diff --git a/upgrade_test/expected/test_tablespace_role_perseg.out b/upgrade_test/expected/test_tablespace_role_perseg.out new file mode 100644 index 00000000000..9440989eb93 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_role_perseg.out @@ -0,0 +1,197 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +NOTICE: tablespace "rolespc_perseg" does not exist, skipping +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; +DROP ROLE IF EXISTS rolespc_persegu1; +NOTICE: role "rolespc_persegu1" does not exist, skipping +DROP ROLE IF EXISTS rolespc_persegu2; +NOTICE: role "rolespc_persegu2" does not exist, skipping +CREATE ROLE rolespc_persegu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespc_persegu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +NOTICE: tablespace "rolespc_perseg2" does not exist, skipping +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +\! rm -rf /tmp/rolespc_perseg; +\! rm -rf /tmp/rolespc_perseg2 diff --git a/upgrade_test/expected/test_tablespace_schema.out b/upgrade_test/expected/test_tablespace_schema.out new file mode 100644 index 00000000000..132ae4b4edf --- /dev/null +++ b/upgrade_test/expected/test_tablespace_schema.out @@ -0,0 +1,139 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +NOTICE: tablespace "schemaspc" does not exist, skipping +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1; +CREATE TABLE a(i int) TABLESPACE schemaspc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +CREATE TABLE a2(i int) TABLESPACE schemaspc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT pg_sleep(10); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + spcs1 | schemaspc | 1 | 4030464 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +NOTICE: tablespace "schemaspc2" does not exist, skipping +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +\! rm -rf /tmp/schemaspc +\! rm -rf /tmp/schemaspc2 diff --git a/upgrade_test/expected/test_tablespace_schema_perseg.out b/upgrade_test/expected/test_tablespace_schema_perseg.out new file mode 100644 index 00000000000..cffe25ba4a3 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_schema_perseg.out @@ -0,0 +1,198 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +NOTICE: tablespace "schemaspc_perseg" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc_perseg2; +NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- +(0 rows) + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE schemaspc_perseg2; +\! rm -rf /tmp/schemaspc_perseg +\! rm -rf /tmp/schemaspc_perseg2 diff --git a/upgrade_test/sql/clean.sql b/upgrade_test/sql/clean.sql index bf71fcb0d19..908aa891f77 100644 --- a/upgrade_test/sql/clean.sql +++ b/upgrade_test/sql/clean.sql @@ -1,5 +1,28 @@ -DROP TABLE badquota.t1; -DROP ROLE testbody; -DROP SCHEMA badquota; +DROP TABLE IF EXISTS badquota.t1; +DROP TABLE IF EXISTS badbody_schema.t2; +DROP ROLE IF EXISTS testbody; +DROP SCHEMA IF EXISTS badquota; +DROP ROLE IF EXISTS badbody; +DROP SCHEMA IF EXISTS badbody_scehma; +DROP SCHEMA IF EXISTS deleteschema; +DROP SCHEMA IF EXISTS srs1; +DROP SCHEMA IF EXISTS srr1; +DROP SCHEMA IF EXISTS srE; +DROP SCHEMA IF EXISTS s1; +DROP SCHEMA IF EXISTS s2; +DROP SCHEMA IF EXISTS s3; +DROP SCHEMA IF EXISTS s4; +DROP SCHEMA IF EXISTS s5; +DROP SCHEMA IF EXISTS s6; +DROP SCHEMA IF EXISTS s7; + +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS b2; +DROP ROLE IF EXISTS srerole; +DROP ROLE IF EXISTS srole; +DROP ROLE IF EXISTS strole; +DROP ROLE IF EXISTS u1; +DROP ROLE IF EXISTS u2; +DROP ROLE IF EXISTS u3temp; DROP EXTENSION diskquota; diff --git a/upgrade_test/sql/install_old_version.sql b/upgrade_test/sql/install_old_version.sql new file mode 100644 index 00000000000..1622491202c --- /dev/null +++ b/upgrade_test/sql/install_old_version.sql @@ -0,0 +1,2 @@ +\! install_old_version_diskquota +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/prepare.sql b/upgrade_test/sql/prepare.sql index 610e3df17d4..5cf05d2d2ac 100644 --- a/upgrade_test/sql/prepare.sql +++ b/upgrade_test/sql/prepare.sql @@ -16,3 +16,16 @@ INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT pg_sleep(10); -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); + +-- prepare a role that has reached quota limit +DROP SCHEMA IF EXISTS badbody_schema; +CREATE SCHEMA badbody_schema; +DROP ROLE IF EXISTS badbody; +CREATE ROLE badbody; +SELECT diskquota.set_role_quota('badbody', '2 MB'); +CREATE TABLE badbody_schema.t2(i INT); +ALTER TABLE badbody_schema.t2 OWNER TO badbody; +INSERT INTO badbody_schema.t2 SELECT generate_series(0, 100000); +SELECT pg_sleep(10); +-- expect fail +INSERT INTO badbody_schema.t2 SELECT generate_series(0, 10); diff --git a/upgrade_test/sql/test_role.sql b/upgrade_test/sql/test_role.sql index f9f8bd0e4c7..39881adbf31 100644 --- a/upgrade_test/sql/test_role.sql +++ b/upgrade_test/sql/test_role.sql @@ -27,6 +27,10 @@ INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO badbody; +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + DROP TABLE b, b2; DROP ROLE u1, u2; RESET search_path; diff --git a/upgrade_test/sql/test_tablespace_role.sql b/upgrade_test/sql/test_tablespace_role.sql new file mode 100644 index 00000000000..31d36f79309 --- /dev/null +++ b/upgrade_test/sql/test_tablespace_role.sql @@ -0,0 +1,85 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; + +DROP ROLE IF EXISTS rolespcu1; +DROP ROLE IF EXISTS rolespcu2; +CREATE ROLE rolespcu1 NOLOGIN; +CREATE ROLE rolespcu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc; +ALTER TABLE b OWNER TO rolespcu1; +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +ALTER TABLE b2 OWNER TO rolespcu1; + +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); + +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); +SELECT pg_sleep(20); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +\! rm -rf /tmp/rolespc; +\! rm -rf /tmp/rolespc2 diff --git a/upgrade_test/sql/test_tablespace_role_perseg.sql b/upgrade_test/sql/test_tablespace_role_perseg.sql new file mode 100644 index 00000000000..68a7f426c99 --- /dev/null +++ b/upgrade_test/sql/test_tablespace_role_perseg.sql @@ -0,0 +1,95 @@ +-- Test role quota +-- start_ignore +\! mkdir /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; + +DROP ROLE IF EXISTS rolespc_persegu1; +DROP ROLE IF EXISTS rolespc_persegu2; +CREATE ROLE rolespc_persegu1 NOLOGIN; +CREATE ROLE rolespc_persegu2 NOLOGIN; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +ALTER TABLE b OWNER TO rolespc_persegu1; + +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); +SELECT pg_sleep(5); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); +SELECT pg_sleep(20); +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); + +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +\! rm -rf /tmp/rolespc_perseg; +\! rm -rf /tmp/rolespc_perseg2 diff --git a/upgrade_test/sql/test_tablespace_schema.sql b/upgrade_test/sql/test_tablespace_schema.sql new file mode 100644 index 00000000000..717550bca62 --- /dev/null +++ b/upgrade_test/sql/test_tablespace_schema.sql @@ -0,0 +1,75 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +SET search_path TO spcs1; + +CREATE TABLE a(i int) TABLESPACE schemaspc; +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +CREATE TABLE a2(i int) TABLESPACE schemaspc; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT pg_sleep(10); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT pg_sleep(20); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT pg_sleep(20); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); +SELECT pg_sleep(20); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); + +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +\! rm -rf /tmp/schemaspc +\! rm -rf /tmp/schemaspc2 + diff --git a/upgrade_test/sql/test_tablespace_schema_perseg.sql b/upgrade_test/sql/test_tablespace_schema_perseg.sql new file mode 100644 index 00000000000..8e40445e5a6 --- /dev/null +++ b/upgrade_test/sql/test_tablespace_schema_perseg.sql @@ -0,0 +1,89 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); +SET search_path TO spcs1_perseg; + +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT pg_sleep(5); +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); +SELECT pg_sleep(5); +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); + +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT pg_sleep(5); +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +-- Test alter tablespace +-- start_ignore +\! mkdir /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc_perseg2; +CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +SELECT pg_sleep(5); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); + +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); +SELECT pg_sleep(5); +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); + +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE schemaspc_perseg2; +\! rm -rf /tmp/schemaspc_perseg +\! rm -rf /tmp/schemaspc_perseg2 + From e38327780264083dd759b7a4d7578268c4a9216b Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Wed, 1 Sep 2021 10:21:12 +0800 Subject: [PATCH 070/330] Fix quota map (#64) * Fix the size for the disk quota info In quota_maps, it stores both size and limit in it. In function calculate_table_disk_usage, we only use the "increment" part to update the size in quota_map. So, if an entry in quota_map is vacuumed last time and its size is some value, such as xx, but as it has been vacuumed, then in the next loop its size will be 0 or 0 + increment. * Fix SEGCOUNT Before this commit, SEGCOUNT is 0 when the is_init is true, fix it. --- expected/prepare.out | 27 ++- expected/test_role.out | 37 ++++ expected/test_schema.out | 18 +- expected/test_tablespace_role.out | 20 +- expected/test_tablespace_role_perseg.out | 15 ++ expected/test_tablespace_schema.out | 18 +- expected/test_tablespace_schema_perseg.out | 15 ++ quotamodel.c | 34 +-- sql/prepare.sql | 5 +- sql/test_role.sql | 11 + sql/test_schema.sql | 3 +- sql/test_tablespace_role.sql | 9 +- sql/test_tablespace_role_perseg.sql | 4 + sql/test_tablespace_schema.sql | 3 +- sql/test_tablespace_schema_perseg.sql | 4 + .../expected/test_tablespace_role.out | 152 +------------ .../expected/test_tablespace_role_perseg.out | 198 +---------------- .../expected/test_tablespace_schema.out | 140 +----------- .../test_tablespace_schema_perseg.out | 199 +----------------- upgrade_test/sql/test_tablespace_role.sql | 86 +------- .../sql/test_tablespace_role_perseg.sql | 96 +-------- upgrade_test/sql/test_tablespace_schema.sql | 76 +------ .../sql/test_tablespace_schema_perseg.sql | 90 +------- 23 files changed, 178 insertions(+), 1082 deletions(-) mode change 100644 => 120000 upgrade_test/expected/test_tablespace_role.out mode change 100644 => 120000 upgrade_test/expected/test_tablespace_role_perseg.out mode change 100644 => 120000 upgrade_test/expected/test_tablespace_schema.out mode change 100644 => 120000 upgrade_test/expected/test_tablespace_schema_perseg.out mode change 100644 => 120000 upgrade_test/sql/test_tablespace_role.sql mode change 100644 => 120000 upgrade_test/sql/test_tablespace_role_perseg.sql mode change 100644 => 120000 upgrade_test/sql/test_tablespace_schema.sql mode change 100644 => 120000 upgrade_test/sql/test_tablespace_schema_perseg.sql diff --git a/expected/prepare.out b/expected/prepare.out index 7c3d5d2060f..48b51c26687 100644 --- a/expected/prepare.out +++ b/expected/prepare.out @@ -29,12 +29,6 @@ SELECT pg_sleep(15); -- prepare a schema that has reached quota limit CREATE SCHEMA badquota; -SELECT diskquota.set_schema_quota('badquota', '1 MB'); - set_schema_quota ------------------- - -(1 row) - DROP ROLE IF EXISTS testbody; NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; @@ -44,12 +38,33 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + SELECT pg_sleep(10); pg_sleep ---------- (1 row) +SELECT size, segid from diskquota.table_size where tableid in (select oid from pg_class where relname='t1'); + size | segid +---------+------- + 1310720 | 0 + 1310720 | 2 + 3932160 | -1 + 1310720 | 1 +(4 rows) + -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); ERROR: schema's disk space quota exceeded with name:badquota diff --git a/expected/test_role.out b/expected/test_role.out index 280bb91ae81..9b21e01e451 100644 --- a/expected/test_role.out +++ b/expected/test_role.out @@ -34,6 +34,43 @@ ERROR: role's disk space quota exceeded with name:u1 -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u1 +-- Delete role quota +SELECT diskquota.set_role_quota('u1', '-1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Reset role quota +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name:u1 +SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + u1 | 1 | 4194304 +(1 row) + ALTER TABLE b OWNER TO u2; SELECT pg_sleep(20); pg_sleep diff --git a/expected/test_schema.out b/expected/test_schema.out index 5440f445e5d..0d6c8919f3c 100644 --- a/expected/test_schema.out +++ b/expected/test_schema.out @@ -1,11 +1,5 @@ -- Test schema CREATE SCHEMA s1; -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - SET search_path TO s1; CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -19,6 +13,18 @@ SELECT pg_sleep(5); (1 row) +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 diff --git a/expected/test_tablespace_role.out b/expected/test_tablespace_role.out index 0e61c01c350..55811740054 100644 --- a/expected/test_tablespace_role.out +++ b/expected/test_tablespace_role.out @@ -18,20 +18,34 @@ NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) TABLESPACE rolespc; NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b OWNER TO rolespcu1; CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO rolespcu1; +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); set_role_tablespace_quota --------------------------- (1 row) -INSERT INTO b SELECT generate_series(1,100); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert success -INSERT INTO b SELECT generate_series(1,100000); +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; SELECT pg_sleep(5); pg_sleep ---------- diff --git a/expected/test_tablespace_role_perseg.out b/expected/test_tablespace_role_perseg.out index 9440989eb93..9b9879a3f0d 100644 --- a/expected/test_tablespace_role_perseg.out +++ b/expected/test_tablespace_role_perseg.out @@ -172,6 +172,21 @@ SELECT pg_sleep(5); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); set_role_tablespace_quota diff --git a/expected/test_tablespace_schema.out b/expected/test_tablespace_schema.out index 132ae4b4edf..f4f4ef4e000 100644 --- a/expected/test_tablespace_schema.out +++ b/expected/test_tablespace_schema.out @@ -6,12 +6,6 @@ CREATE SCHEMA spcs1; DROP TABLESPACE IF EXISTS schemaspc; NOTICE: tablespace "schemaspc" does not exist, skipping CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - SET search_path TO spcs1; CREATE TABLE a(i int) TABLESPACE schemaspc; NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -25,6 +19,18 @@ SELECT pg_sleep(5); (1 row) +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded diff --git a/expected/test_tablespace_schema_perseg.out b/expected/test_tablespace_schema_perseg.out index cffe25ba4a3..d5506c67807 100644 --- a/expected/test_tablespace_schema_perseg.out +++ b/expected/test_tablespace_schema_perseg.out @@ -163,6 +163,21 @@ SELECT pg_sleep(5); -- expect insert success INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota -- Test delete tablespace schema quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); set_per_segment_quota diff --git a/quotamodel.c b/quotamodel.c index 75790cabf77..2033fe62e34 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -173,7 +173,6 @@ static void remove_quota(QuotaType type, Oid* keys, int16 segid); static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); static void check_quota_map(QuotaType type); static void clear_all_quota_maps(void); -static void vacuum_all_quota_maps(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys, int16 segid); /* functions to refresh disk quota model*/ @@ -223,15 +222,12 @@ update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid) quota_info[type].map, &key, HASH_ENTER, &found); if (!found) { - entry->size = size; + entry->size = 0; entry->limit = -1; memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); entry->segid = key.segid; } - else - { - entry->size += size; - } + entry->size += size; } /* add a new entry quota or update the old entry limit */ @@ -365,24 +361,6 @@ clear_all_quota_maps(void) } } -static void -vacuum_all_quota_maps(void) { - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) - { - HASH_SEQ_STATUS iter = {0}; - hash_seq_init(&iter, quota_info[type].map); - struct QuotaMapEntry *entry = NULL; - while ((entry = hash_seq_search(&iter)) != NULL) - { - if (entry->limit == -1) - { - remove_quota(type, entry->keys, entry->segid); - } - } - - } - -} /* ---- Functions for disk quota shared memory ---- */ /* * DiskQuotaShmemInit @@ -668,6 +646,13 @@ do_check_diskquota_state_is_ready(void) void refresh_disk_quota_model(bool is_init) { + SEGCOUNT = getgpsegmentCount(); + if (SEGCOUNT <= 0 ) + { + ereport(ERROR, + (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + } + if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model started"))); /* skip refresh model when load_quotas failed */ @@ -1306,7 +1291,6 @@ do_load_quotas(void) } } - vacuum_all_quota_maps(); return; } diff --git a/sql/prepare.sql b/sql/prepare.sql index 2ce7c902eea..7457643aa16 100644 --- a/sql/prepare.sql +++ b/sql/prepare.sql @@ -1,7 +1,6 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u -SELECT diskquota.init_table_size_table(); -- end_ignore SELECT pg_sleep(1); \! cp data/csmall.txt /tmp/csmall.txt @@ -9,12 +8,14 @@ SELECT pg_sleep(15); -- prepare a schema that has reached quota limit CREATE SCHEMA badquota; -SELECT diskquota.set_schema_quota('badquota', '1 MB'); DROP ROLE IF EXISTS testbody; CREATE ROLE testbody; CREATE TABLE badquota.t1(i INT); ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); +SELECT diskquota.set_schema_quota('badquota', '1 MB'); SELECT pg_sleep(10); +SELECT size, segid from diskquota.table_size where tableid in (select oid from pg_class where relname='t1'); -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/sql/test_role.sql b/sql/test_role.sql index 8aaa3a9bca3..3847b7d86c7 100644 --- a/sql/test_role.sql +++ b/sql/test_role.sql @@ -20,6 +20,17 @@ SELECT pg_sleep(5); INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); +-- Delete role quota +SELECT diskquota.set_role_quota('u1', '-1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Reset role quota +SELECT diskquota.set_role_quota('u1', '1 MB'); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; ALTER TABLE b OWNER TO u2; SELECT pg_sleep(20); -- expect insert succeed diff --git a/sql/test_schema.sql b/sql/test_schema.sql index 3e825db617d..b0aff40ba6b 100644 --- a/sql/test_schema.sql +++ b/sql/test_schema.sql @@ -1,6 +1,5 @@ -- Test schema CREATE SCHEMA s1; -SELECT diskquota.set_schema_quota('s1', '1 MB'); SET search_path TO s1; CREATE TABLE a(i int); @@ -8,6 +7,8 @@ INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(5); +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); CREATE TABLE a2(i int); diff --git a/sql/test_tablespace_role.sql b/sql/test_tablespace_role.sql index 31d36f79309..87b195f738e 100644 --- a/sql/test_tablespace_role.sql +++ b/sql/test_tablespace_role.sql @@ -12,16 +12,19 @@ DROP ROLE IF EXISTS rolespcu2; CREATE ROLE rolespcu1 NOLOGIN; CREATE ROLE rolespcu2 NOLOGIN; CREATE TABLE b (t TEXT) TABLESPACE rolespc; -ALTER TABLE b OWNER TO rolespcu1; CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; ALTER TABLE b2 OWNER TO rolespcu1; -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,100000); SELECT pg_sleep(5); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- expect insert fail diff --git a/sql/test_tablespace_role_perseg.sql b/sql/test_tablespace_role_perseg.sql index 68a7f426c99..0920aaeb39d 100644 --- a/sql/test_tablespace_role_perseg.sql +++ b/sql/test_tablespace_role_perseg.sql @@ -78,6 +78,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); SELECT pg_sleep(5); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); diff --git a/sql/test_tablespace_schema.sql b/sql/test_tablespace_schema.sql index 717550bca62..db584007f18 100644 --- a/sql/test_tablespace_schema.sql +++ b/sql/test_tablespace_schema.sql @@ -5,7 +5,6 @@ CREATE SCHEMA spcs1; DROP TABLESPACE IF EXISTS schemaspc; CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); SET search_path TO spcs1; CREATE TABLE a(i int) TABLESPACE schemaspc; @@ -13,6 +12,8 @@ INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(5); +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); CREATE TABLE a2(i int) TABLESPACE schemaspc; diff --git a/sql/test_tablespace_schema_perseg.sql b/sql/test_tablespace_schema_perseg.sql index 8e40445e5a6..b95c7067bb4 100644 --- a/sql/test_tablespace_schema_perseg.sql +++ b/sql/test_tablespace_schema_perseg.sql @@ -70,6 +70,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); SELECT pg_sleep(5); -- expect insert success INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); +SELECT pg_sleep(5); +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); -- Test delete tablespace schema quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); diff --git a/upgrade_test/expected/test_tablespace_role.out b/upgrade_test/expected/test_tablespace_role.out deleted file mode 100644 index 0e61c01c350..00000000000 --- a/upgrade_test/expected/test_tablespace_role.out +++ /dev/null @@ -1,151 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir /tmp/rolespc --- end_ignore -DROP TABLESPACE IF EXISTS rolespc; -NOTICE: tablespace "rolespc" does not exist, skipping -CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; -CREATE SCHEMA rolespcrole; -SET search_path TO rolespcrole; -DROP ROLE IF EXISTS rolespcu1; -NOTICE: role "rolespcu1" does not exist, skipping -DROP ROLE IF EXISTS rolespcu2; -NOTICE: role "rolespcu2" does not exist, skipping -CREATE ROLE rolespcu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE rolespcu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b OWNER TO rolespcu1; -CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b2 OWNER TO rolespcu1; -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded --- Test show_fast_schema_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - rolespcu1 | rolespc | 1 | 4194304 -(1 row) - --- Test alter owner -ALTER TABLE b OWNER TO rolespcu2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded --- Test alter tablespace --- start_ignore -\! mkdir /tmp/rolespc2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc2; -NOTICE: tablespace "rolespc2" does not exist, skipping -CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; -ALTER TABLE b SET TABLESPACE rolespc2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc -ALTER TABLE b SET TABLESPACE rolespc; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded --- Test update quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,1000000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -DROP TABLE b, b2; -DROP ROLE rolespcu1, rolespcu2; -RESET search_path; -DROP SCHEMA rolespcrole; -DROP TABLESPACE rolespc; -DROP TABLESPACE rolespc2; -\! rm -rf /tmp/rolespc; -\! rm -rf /tmp/rolespc2 diff --git a/upgrade_test/expected/test_tablespace_role.out b/upgrade_test/expected/test_tablespace_role.out new file mode 120000 index 00000000000..f7b0a38658d --- /dev/null +++ b/upgrade_test/expected/test_tablespace_role.out @@ -0,0 +1 @@ +../../expected/test_tablespace_role.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_role_perseg.out b/upgrade_test/expected/test_tablespace_role_perseg.out deleted file mode 100644 index 9440989eb93..00000000000 --- a/upgrade_test/expected/test_tablespace_role_perseg.out +++ /dev/null @@ -1,197 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir /tmp/rolespc_perseg --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg; -NOTICE: tablespace "rolespc_perseg" does not exist, skipping -CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; -CREATE SCHEMA rolespc_persegrole; -SET search_path TO rolespc_persegrole; -DROP ROLE IF EXISTS rolespc_persegu1; -NOTICE: role "rolespc_persegu1" does not exist, skipping -DROP ROLE IF EXISTS rolespc_persegu2; -NOTICE: role "rolespc_persegu2" does not exist, skipping -CREATE ROLE rolespc_persegu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE rolespc_persegu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded --- change tablespace role quota -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- Test show_fast_schema_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes -------------------+-----------------+-------------+----------------------------- - rolespc_persegu1 | rolespc_perseg | 10 | 4063232 -(1 row) - -SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - ----- expect insert fail by tablespace schema perseg quota -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota --- Test alter owner -ALTER TABLE b OWNER TO rolespc_persegu2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota --- Test alter tablespace --- start_ignore -\! mkdir /tmp/rolespc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg2; -NOTICE: tablespace "rolespc_perseg2" does not exist, skipping -CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; -ALTER TABLE b SET TABLESPACE rolespc_perseg2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc_perseg -ALTER TABLE b SET TABLESPACE rolespc_perseg; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes -------------------+-----------------+-------------+----------------------------- - rolespc_persegu1 | rolespc_perseg | 10 | 4063232 -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -DROP table b; -DROP ROLE rolespc_persegu1, rolespc_persegu2; -RESET search_path; -DROP SCHEMA rolespc_persegrole; -DROP TABLESPACE rolespc_perseg; -DROP TABLESPACE rolespc_perseg2; -\! rm -rf /tmp/rolespc_perseg; -\! rm -rf /tmp/rolespc_perseg2 diff --git a/upgrade_test/expected/test_tablespace_role_perseg.out b/upgrade_test/expected/test_tablespace_role_perseg.out new file mode 120000 index 00000000000..d95b77ecb94 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_role_perseg.out @@ -0,0 +1 @@ +../../expected/test_tablespace_role_perseg.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_schema.out b/upgrade_test/expected/test_tablespace_schema.out deleted file mode 100644 index 132ae4b4edf..00000000000 --- a/upgrade_test/expected/test_tablespace_schema.out +++ /dev/null @@ -1,139 +0,0 @@ --- Test schema --- start_ignore -\! mkdir /tmp/schemaspc --- end_ignore -CREATE SCHEMA spcs1; -DROP TABLESPACE IF EXISTS schemaspc; -NOTICE: tablespace "schemaspc" does not exist, skipping -CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SET search_path TO spcs1; -CREATE TABLE a(i int) TABLESPACE schemaspc; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -CREATE TABLE a2(i int) TABLESPACE schemaspc; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded --- Test alter table set schema -CREATE SCHEMA spcs2; -ALTER TABLE spcs1.a SET SCHEMA spcs2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO spcs2.a SELECT generate_series(1,200); -ALTER TABLE spcs2.a SET SCHEMA spcs1; -SELECT pg_sleep(10); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - spcs1 | schemaspc | 1 | 4030464 -(1 row) - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/schemaspc2 --- end_ignore -DROP TABLESPACE IF EXISTS schemaspc2; -NOTICE: tablespace "schemaspc2" does not exist, skipping -CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; -ALTER TABLE a SET TABLESPACE schemaspc2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded --- Test update quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,1000000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded --- Test delete quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -RESET search_path; -DROP TABLE spcs1.a2, spcs1.a; -DROP SCHEMA spcs1, spcs2; -DROP TABLESPACE schemaspc; -DROP TABLESPACE schemaspc2; -\! rm -rf /tmp/schemaspc -\! rm -rf /tmp/schemaspc2 diff --git a/upgrade_test/expected/test_tablespace_schema.out b/upgrade_test/expected/test_tablespace_schema.out new file mode 120000 index 00000000000..13dffba2095 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_schema.out @@ -0,0 +1 @@ +../../expected/test_tablespace_schema.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_schema_perseg.out b/upgrade_test/expected/test_tablespace_schema_perseg.out deleted file mode 100644 index cffe25ba4a3..00000000000 --- a/upgrade_test/expected/test_tablespace_schema_perseg.out +++ /dev/null @@ -1,198 +0,0 @@ --- Test schema --- start_ignore -\! mkdir /tmp/schemaspc_perseg --- end_ignore --- Test tablespace quota perseg -CREATE SCHEMA spcs1_perseg; -DROP TABLESPACE IF EXISTS schemaspc_perseg; -NOTICE: tablespace "schemaspc_perseg" does not exist, skipping -CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SET search_path TO spcs1_perseg; -CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail by tablespace schema diskquota -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded --- change tablespace schema quota -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+------------------+-------------+----------------------------- - spcs1_perseg | schemaspc_perseg | 10 | 3932160 -(1 row) - -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - ----- expect insert fail by tablespace schema perseg quota -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota --- Test alter table set schema -CREATE SCHEMA spcs2_perseg; -ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); -ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+------------------+-------------+----------------------------- - spcs1_perseg | schemaspc_perseg | 10 | 3932160 -(1 row) - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/schemaspc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS schemaspc_perseg2; -NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping -CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; -ALTER TABLE a SET TABLESPACE schemaspc_perseg2; -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - ----- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); --- Test delete tablespace schema quota -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- -(0 rows) - -RESET search_path; -DROP TABLE spcs1_perseg.a; -DROP SCHEMA spcs1_perseg; -DROP TABLESPACE schemaspc_perseg; -DROP TABLESPACE schemaspc_perseg2; -\! rm -rf /tmp/schemaspc_perseg -\! rm -rf /tmp/schemaspc_perseg2 diff --git a/upgrade_test/expected/test_tablespace_schema_perseg.out b/upgrade_test/expected/test_tablespace_schema_perseg.out new file mode 120000 index 00000000000..90dff77a254 --- /dev/null +++ b/upgrade_test/expected/test_tablespace_schema_perseg.out @@ -0,0 +1 @@ +../../expected/test_tablespace_schema_perseg.out \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_role.sql b/upgrade_test/sql/test_tablespace_role.sql deleted file mode 100644 index 31d36f79309..00000000000 --- a/upgrade_test/sql/test_tablespace_role.sql +++ /dev/null @@ -1,85 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir /tmp/rolespc --- end_ignore -DROP TABLESPACE IF EXISTS rolespc; -CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; -CREATE SCHEMA rolespcrole; -SET search_path TO rolespcrole; - -DROP ROLE IF EXISTS rolespcu1; -DROP ROLE IF EXISTS rolespcu2; -CREATE ROLE rolespcu1 NOLOGIN; -CREATE ROLE rolespcu2 NOLOGIN; -CREATE TABLE b (t TEXT) TABLESPACE rolespc; -ALTER TABLE b OWNER TO rolespcu1; -CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; -ALTER TABLE b2 OWNER TO rolespcu1; - -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); - --- Test show_fast_schema_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; - --- Test alter owner -ALTER TABLE b OWNER TO rolespcu2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(20); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/rolespc2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc2; -CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; -ALTER TABLE b SET TABLESPACE rolespc2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc -ALTER TABLE b SET TABLESPACE rolespc; -SELECT pg_sleep(20); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test update quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); -SELECT pg_sleep(20); --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,1000000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO b SELECT generate_series(1,100); - -DROP TABLE b, b2; -DROP ROLE rolespcu1, rolespcu2; -RESET search_path; -DROP SCHEMA rolespcrole; -DROP TABLESPACE rolespc; -DROP TABLESPACE rolespc2; -\! rm -rf /tmp/rolespc; -\! rm -rf /tmp/rolespc2 diff --git a/upgrade_test/sql/test_tablespace_role.sql b/upgrade_test/sql/test_tablespace_role.sql new file mode 120000 index 00000000000..1e694286c9a --- /dev/null +++ b/upgrade_test/sql/test_tablespace_role.sql @@ -0,0 +1 @@ +../../sql/test_tablespace_role.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_role_perseg.sql b/upgrade_test/sql/test_tablespace_role_perseg.sql deleted file mode 100644 index 68a7f426c99..00000000000 --- a/upgrade_test/sql/test_tablespace_role_perseg.sql +++ /dev/null @@ -1,95 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir /tmp/rolespc_perseg --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg; -CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; -CREATE SCHEMA rolespc_persegrole; -SET search_path TO rolespc_persegrole; - -DROP ROLE IF EXISTS rolespc_persegu1; -DROP ROLE IF EXISTS rolespc_persegu2; -CREATE ROLE rolespc_persegu1 NOLOGIN; -CREATE ROLE rolespc_persegu2 NOLOGIN; -CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; -ALTER TABLE b OWNER TO rolespc_persegu1; - -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); --- change tablespace role quota -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO b SELECT generate_series(1,100); - --- Test show_fast_schema_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - -SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); -SELECT pg_sleep(5); ----- expect insert fail by tablespace schema perseg quota -INSERT INTO b SELECT generate_series(1,100); --- Test alter owner -ALTER TABLE b OWNER TO rolespc_persegu2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT pg_sleep(20); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/rolespc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg2; -CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; -ALTER TABLE b SET TABLESPACE rolespc_perseg2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc_perseg -ALTER TABLE b SET TABLESPACE rolespc_perseg; -SELECT pg_sleep(20); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); -SELECT pg_sleep(20); -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO b SELECT generate_series(1,100); - --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO b SELECT generate_series(1,100); - -DROP table b; -DROP ROLE rolespc_persegu1, rolespc_persegu2; -RESET search_path; -DROP SCHEMA rolespc_persegrole; -DROP TABLESPACE rolespc_perseg; -DROP TABLESPACE rolespc_perseg2; -\! rm -rf /tmp/rolespc_perseg; -\! rm -rf /tmp/rolespc_perseg2 diff --git a/upgrade_test/sql/test_tablespace_role_perseg.sql b/upgrade_test/sql/test_tablespace_role_perseg.sql new file mode 120000 index 00000000000..46107ae84ba --- /dev/null +++ b/upgrade_test/sql/test_tablespace_role_perseg.sql @@ -0,0 +1 @@ +../../sql/test_tablespace_role_perseg.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_schema.sql b/upgrade_test/sql/test_tablespace_schema.sql deleted file mode 100644 index 717550bca62..00000000000 --- a/upgrade_test/sql/test_tablespace_schema.sql +++ /dev/null @@ -1,75 +0,0 @@ --- Test schema --- start_ignore -\! mkdir /tmp/schemaspc --- end_ignore -CREATE SCHEMA spcs1; -DROP TABLESPACE IF EXISTS schemaspc; -CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); -SET search_path TO spcs1; - -CREATE TABLE a(i int) TABLESPACE schemaspc; -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -CREATE TABLE a2(i int) TABLESPACE schemaspc; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); - --- Test alter table set schema -CREATE SCHEMA spcs2; -ALTER TABLE spcs1.a SET SCHEMA spcs2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO spcs2.a SELECT generate_series(1,200); -ALTER TABLE spcs2.a SET SCHEMA spcs1; -SELECT pg_sleep(10); --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/schemaspc2 --- end_ignore -DROP TABLESPACE IF EXISTS schemaspc2; -CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; -ALTER TABLE a SET TABLESPACE schemaspc2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc; -SELECT pg_sleep(20); --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); - --- Test update quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); -SELECT pg_sleep(20); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,1000000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); - --- Test delete quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); - -RESET search_path; -DROP TABLE spcs1.a2, spcs1.a; -DROP SCHEMA spcs1, spcs2; -DROP TABLESPACE schemaspc; -DROP TABLESPACE schemaspc2; -\! rm -rf /tmp/schemaspc -\! rm -rf /tmp/schemaspc2 - diff --git a/upgrade_test/sql/test_tablespace_schema.sql b/upgrade_test/sql/test_tablespace_schema.sql new file mode 120000 index 00000000000..74976f0842d --- /dev/null +++ b/upgrade_test/sql/test_tablespace_schema.sql @@ -0,0 +1 @@ +../../sql/test_tablespace_schema.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_schema_perseg.sql b/upgrade_test/sql/test_tablespace_schema_perseg.sql deleted file mode 100644 index 8e40445e5a6..00000000000 --- a/upgrade_test/sql/test_tablespace_schema_perseg.sql +++ /dev/null @@ -1,89 +0,0 @@ --- Test schema --- start_ignore -\! mkdir /tmp/schemaspc_perseg --- end_ignore --- Test tablespace quota perseg -CREATE SCHEMA spcs1_perseg; -DROP TABLESPACE IF EXISTS schemaspc_perseg; -CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); -SET search_path TO spcs1_perseg; - -CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail by tablespace schema diskquota -INSERT INTO a SELECT generate_series(1,100); --- change tablespace schema quota -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); -SELECT pg_sleep(5); ----- expect insert fail by tablespace schema perseg quota -INSERT INTO a SELECT generate_series(1,100); - --- Test alter table set schema -CREATE SCHEMA spcs2_perseg; -ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; -SELECT pg_sleep(5); --- expect insert succeed -INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); -ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - --- Test alter tablespace --- start_ignore -\! mkdir /tmp/schemaspc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS schemaspc_perseg2; -CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; -ALTER TABLE a SET TABLESPACE schemaspc_perseg2; -SELECT pg_sleep(5); --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc_perseg; -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); - --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); -SELECT pg_sleep(5); ----- expect insert fail -INSERT INTO a SELECT generate_series(1,100); - --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); - --- Test delete tablespace schema quota -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); -SELECT pg_sleep(5); --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - -RESET search_path; -DROP TABLE spcs1_perseg.a; -DROP SCHEMA spcs1_perseg; -DROP TABLESPACE schemaspc_perseg; -DROP TABLESPACE schemaspc_perseg2; -\! rm -rf /tmp/schemaspc_perseg -\! rm -rf /tmp/schemaspc_perseg2 - diff --git a/upgrade_test/sql/test_tablespace_schema_perseg.sql b/upgrade_test/sql/test_tablespace_schema_perseg.sql new file mode 120000 index 00000000000..f4fc79267b9 --- /dev/null +++ b/upgrade_test/sql/test_tablespace_schema_perseg.sql @@ -0,0 +1 @@ +../../sql/test_tablespace_schema_perseg.sql \ No newline at end of file From 12a6ffe19bd625facab3c5455265d23a26ae3748 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 9 Sep 2021 15:38:39 +0800 Subject: [PATCH 071/330] Fix index tbl (#65) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Support index in a different tablespace with the original table * Use pg_table_size instead of pg_total_realtion_size to get the disk usage. pg_table_size includes all forks, toast, and toast index, except the index. * Add a function to get the list oids of the tables which the diskquota needs to care about in the database. firstly, get the tables which relkind is “r” or “m” and not system tables "select oid from pg_class" " where oid >= %u and (relkind='r' or relkind='m')", FirstNormalObjectId); then fetch the indexes of those tables and add them to the list. * Use the list of oids in init_table_size_table and calculate_table_disk_usage functions * Check indexes in quota_check_ExecCheckRTPerms Function quota_check_ExecCheckRTPerms only checks RangeTblEntry in the query before, don't check the indexes. We get the index list for each RangeTblEntry and check the index quota too. --- README.md | 8 +- concourse/scripts/test_diskquota.sh | 5 +- concourse/scripts/upgrade_extension.sh | 2 +- diskquota.h | 1 + diskquota_schedule | 2 +- diskquota_utility.c | 105 ++++++++++++++++++++--- enforcement.c | 30 ++++++- expected/test_index.out | 111 +++++++++++++++++++++++++ expected/test_table_size.out | 3 +- gp_activetable.c | 6 +- quotamodel.c | 34 ++++---- sql/test_index.sql | 46 ++++++++++ sql/test_table_size.sql | 4 +- 13 files changed, 308 insertions(+), 49 deletions(-) create mode 100644 expected/test_index.out create mode 100644 sql/test_index.sql diff --git a/README.md b/README.md index d980fc2a94b..188c7276799 100644 --- a/README.md +++ b/README.md @@ -59,9 +59,9 @@ check interval. Active tables are detected at Segment QE side: hooks in smgecreate(), smgrextend() and smgrtruncate() are used to detect active tables and store them (currently relfilenode) in the shared memory. Diskquota worker process will periodically call dispatch queries to all the segments and -consume active tables in shared memories, convert relfilenode to relaton oid, -and calcualte table size by calling pg_total_relation_size(), which will sum -the size of table (including: base, vm, fsm, toast and index) in each segment. +consume active tables in shared memories, convert relfilenode to relaton oid, +and calcualte table size by calling pg_table_size(), which will sum +the size of table (including: base, vm, fsm, toast) in each segment. ## Enforcement Enforcement is implemented as hooks. There are two kinds of enforcement hooks: @@ -230,7 +230,7 @@ END; 'Create Table As' command has the similar problem. One solution direction is that we calculate the additional 'uncommited data size' -for schema and role in worker process. Since pg_total_relation_size need to hold +for schema and role in worker process. Since pg_table_size need to hold AccessShareLock to relation (And worker process don't even know this reloid exists), we need to skip it, and call stat() directly with tolerant to file unlink. Skip lock is dangerous and we plan to leave it as known issue at current stage. diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 709b71b9fac..f761d58ce19 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -5,7 +5,7 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -CUT_NUMBER=5 +CUT_NUMBER=6 source "${GPDB_CONCOURSE_DIR}/common.bash" source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" @@ -15,9 +15,6 @@ function _main() { time make_cluster time install_diskquota - if [ "${DISKQUOTA_OS}" == "ubuntu18.04" -o "${DISKQUOTA_OS}" == "rhel6" ]; then - CUT_NUMBER=6 - fi time test ${TOP_DIR}/diskquota_src/ true } diff --git a/concourse/scripts/upgrade_extension.sh b/concourse/scripts/upgrade_extension.sh index a867819563d..2f1e4f94f33 100755 --- a/concourse/scripts/upgrade_extension.sh +++ b/concourse/scripts/upgrade_extension.sh @@ -5,7 +5,7 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -CUT_NUMBER=5 +CUT_NUMBER=6 source "${GPDB_CONCOURSE_DIR}/common.bash" source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" diff --git a/diskquota.h b/diskquota.h index f07b96ba388..696f6f18fd9 100644 --- a/diskquota.h +++ b/diskquota.h @@ -115,4 +115,5 @@ extern int diskquota_max_active_tables; extern int SEGCOUNT; extern int get_ext_major_version(void); extern void truncateStringInfo(StringInfo str, int nchars); +extern List *get_rel_oid_list(void); #endif diff --git a/diskquota_schedule b/diskquota_schedule index edbe3bd9810..068adfe7a99 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -4,7 +4,7 @@ test: prepare # test: test_table_size test: test_fast_disk_check #test: test_insert_after_drop -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test: test_truncate test: test_delete_quota test: test_partition diff --git a/diskquota_utility.c b/diskquota_utility.c index eb700472632..cf709050c4b 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -70,13 +70,15 @@ static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); +static char *convert_oidlist_to_string(List *oidlist); int get_ext_major_version(void); +List *get_rel_oid_list(void); /* ---- Help Functions to set quota limit. ---- */ /* * Initialize table diskquota.table_size. - * calculate table size by UDF pg_total_relation_size + * calculate table size by UDF pg_table_size * This function is called by user, errors should not * be catch, and should be sent back to user */ @@ -109,19 +111,20 @@ init_table_size_table(PG_FUNCTION_ARGS) heap_close(rel, NoLock); /* - * Why don't use insert into diskquota.table_size select from pg_total_relation_size here? + * Why don't use insert into diskquota.table_size select from pg_table_size here? * - * insert into foo select oid, pg_total_relation_size(oid), -1 from pg_class where + * insert into foo select oid, pg_table_size(oid), -1 from pg_class where * oid >= 16384 and (relkind='r' or relkind='m'); * ERROR: This query is not currently supported by GPDB. (entry db 127.0.0.1:6000 pid=61114) * * Some functions are peculiar in that they do their own dispatching. - * Such as pg_total_relation_size. + * Such as pg_table_size. * They do not work on entry db since we do not support dispatching * from entry-db currently. */ SPI_connect(); extMajorVersion = get_ext_major_version(); + char *oids = convert_oidlist_to_string(get_rel_oid_list()); /* delete all the table size info in table_size if exist. */ initStringInfo(&buf); @@ -134,13 +137,13 @@ init_table_size_table(PG_FUNCTION_ARGS) /* fetch table size for master*/ resetStringInfo(&buf); appendStringInfo(&buf, - "select oid, pg_total_relation_size(oid), -1" + "select oid, pg_table_size(oid), -1" " from pg_class" - " where oid >= %u and (relkind='r' or relkind='m')", - FirstNormalObjectId); + " where oid in (%s);", + oids); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot fetch in pg_total_relation_size. error code %d", ret); + elog(ERROR, "cannot fetch in pg_table_size. error code %d", ret); /* fill table_size table with table oid and size info for master. */ appendStringInfo(&insert_buf, @@ -149,13 +152,13 @@ init_table_size_table(PG_FUNCTION_ARGS) /* fetch table size on segments*/ resetStringInfo(&buf); appendStringInfo(&buf, - "select oid, pg_total_relation_size(oid), gp_segment_id" + "select oid, pg_table_size(oid), gp_segment_id" " from gp_dist_random('pg_class')" - " where oid >= %u and (relkind='r' or relkind='m')", - FirstNormalObjectId); + " where oid in (%s);", + oids); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot fetch in pg_total_relation_size. error code %d", ret); + elog(ERROR, "cannot fetch in pg_table_size. error code %d", ret); /* fill table_size table with table oid and size info for segments. */ insert_flag = insert_flag | generate_insert_table_size_sql(&insert_buf, extMajorVersion); @@ -1024,3 +1027,81 @@ get_ext_major_version(void) } return 0; } + +static char * +convert_oidlist_to_string(List *oidlist) +{ + StringInfoData buf; + bool hasOid = false; + ListCell *l; + initStringInfo(&buf); + + foreach(l, oidlist) + { + Oid oid = lfirst_oid(l); + appendStringInfo(&buf, "%u, ", oid); + hasOid = true; + } + if (hasOid) + truncateStringInfo(&buf, buf.len - strlen(", ")); + return buf.data; +} + +/* + * Get the list of oids of the tables which diskquota + * needs to care about in the database. + * Firstly the all the table oids which relkind is 'r' + * or 'm' and not system table. + * Then, fetch the indexes of those tables. + */ + +List * +get_rel_oid_list(void) +{ + List *oidlist = NIL; + StringInfoData buf; + int ret; + + initStringInfo(&buf); + appendStringInfo(&buf, + "select oid " + " from pg_class" + " where oid >= %u and (relkind='r' or relkind='m')", + FirstNormalObjectId); + + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot fetch in pg_class. error code %d", ret); + TupleDesc tupdesc = SPI_tuptable->tupdesc; + for(int i = 0; i < SPI_processed; i++) + { + HeapTuple tup; + bool isnull; + Oid oid; + ListCell *l; + + tup = SPI_tuptable->vals[i]; + oid = DatumGetObjectId(SPI_getbinval(tup,tupdesc, 1, &isnull)); + if (!isnull) + { + Relation relation; + List *indexIds; + relation = try_relation_open(oid, AccessShareLock, false); + if (!relation) + continue; + + oidlist = lappend_oid(oidlist, oid); + indexIds = RelationGetIndexList(relation); + if (indexIds != NIL ) + { + foreach(l, indexIds) + { + oidlist = lappend_oid(oidlist, lfirst_oid(l)); + } + } + relation_close(relation, NoLock); + list_free(indexIds); + } + } + return oidlist; +} diff --git a/enforcement.c b/enforcement.c index 687653507dc..c1fe6af6ea6 100644 --- a/enforcement.c +++ b/enforcement.c @@ -45,10 +45,13 @@ init_disk_quota_enforcement(void) static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) { - ListCell *l; + ListCell *l; foreach(l, rangeTable) { + List *indexIds; + ListCell *oid; + Relation relation; RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); /* see ExecCheckRTEPerms() */ @@ -68,6 +71,31 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) * quota limit exceeded. */ quota_check_common(rte->relid); + /* Check the indexes of the this relation */ + relation = try_relation_open(rte->relid, AccessShareLock, false); + if (!relation) + continue; + + indexIds = RelationGetIndexList(relation); + PG_TRY(); + { + if (indexIds != NIL ) + { + foreach(oid, indexIds) + { + quota_check_common(lfirst_oid(oid)); + } + } + } + PG_CATCH(); + { + relation_close(relation, AccessShareLock); + list_free(indexIds); + PG_RE_THROW(); + } + PG_END_TRY(); + relation_close(relation, AccessShareLock); + list_free(indexIds); } return true; } diff --git a/expected/test_index.out b/expected/test_index.out new file mode 100644 index 00000000000..5455346ac82 --- /dev/null +++ b/expected/test_index.out @@ -0,0 +1,111 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/indexspc +-- end_ignore +CREATE SCHEMA indexschema1; +DROP TABLESPACE IF EXISTS indexspc; +NOTICE: tablespace "indexspc" does not exist, skipping +CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; +SET search_path TO indexschema1; +CREATE TABLE test_index_a(i int) TABLESPACE indexspc; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO test_index_a SELECT generate_series(1,20000); +SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1081344 +(1 row) + +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and relname='test_index_a' and segid=-1; + size | segid +---------+------- + 1081344 | -1 +(1 row) + +-- create index for the table, index in default tablespace +CREATE INDEX a_index ON test_index_a(i); +INSERT INTO test_index_a SELECT generate_series(1,10000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1441792 +(1 row) + +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; + size | segid +---------+------- + 1441792 | -1 + 1212416 | -1 +(2 rows) + +-- add index to tablespace indexspc +ALTER index a_index SET TABLESPACE indexspc; +SELECT pg_sleep(20); + pg_sleep +---------- + +(1 row) + +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 2654208 +(1 row) + +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; + size | segid +---------+------- + 1441792 | -1 + 1212416 | -1 +(2 rows) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace:indexspc schema:indexschema1 diskquota exceeded +-- index tablespace quota exceeded +ALTER table test_index_a SET TABLESPACE pg_default; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +INSERT INTO test_index_a SELECT generate_series(1,200000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace:indexspc schema:indexschema1 diskquota exceeded +RESET search_path; +DROP INDEX indexschema1.a_index; +DROP TABLE indexschema1.test_index_a; +DROP SCHEMA indexschema1; +DROP TABLESPACE indexspc; +\! rm -rf /tmp/indexspc diff --git a/expected/test_table_size.out b/expected/test_table_size.out index 36421dd9f19..4c96b5dc4bc 100644 --- a/expected/test_table_size.out +++ b/expected/test_table_size.out @@ -12,8 +12,7 @@ select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -insert into buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; -insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; +insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; ?column? ---------- diff --git a/gp_activetable.c b/gp_activetable.c index b67e7960f08..805311b885d 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -395,7 +395,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) } /* - * Call pg_total_relation_size to calcualte the + * Call pg_table_size to calcualte the * active table size on each segments. */ static HTAB * @@ -468,8 +468,8 @@ get_active_tables_stats(ArrayType *array) */ PG_TRY(); { - /* call pg_total_relation_size to get the active table size */ - entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, + /* call pg_table_size to get the active table size */ + entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_table_size, ObjectIdGetDatum(relOid))); } PG_CATCH(); diff --git a/quotamodel.c b/quotamodel.c index 2033fe62e34..1231a50d728 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -745,18 +745,15 @@ calculate_table_disk_usage(bool is_init) bool table_size_map_found; bool active_tbl_found; int64 updated_total_size; - Relation classRel; - HeapTuple tuple; - HeapScanDesc relScan; TableSizeEntry *tsentry = NULL; Oid relOid; HASH_SEQ_STATUS iter; HTAB *local_active_table_stat_map; DiskQuotaActiveTableEntry *active_table_entry; TableEntryKey key; + List *oidlist; + ListCell *l; - classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); /* * initialization stage all the tables are active. later loop, only the @@ -779,18 +776,20 @@ calculate_table_disk_usage(bool is_init) * calculate the file size for active table and update namespace_size_map * and role_size_map */ - while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) + oidlist = get_rel_oid_list(); + foreach(l, oidlist) { - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + HeapTuple classTup; + Form_pg_class classForm; + relOid = lfirst_oid(l); - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW) - continue; - relOid = HeapTupleGetOid(tuple); - - /* ignore system table */ - if (relOid < FirstNormalObjectId) + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); + if (!HeapTupleIsValid(classTup)) + { + elog(WARNING, "cache lookup failed for relation %u", relOid); continue; + } + classForm = (Form_pg_class) GETSTRUCT(classTup); /* * The segid is the same as the content id in gp_segment_configuration @@ -832,8 +831,8 @@ calculate_table_disk_usage(bool is_init) /* DirectFunctionCall1 may fail, since table maybe dropped by other backend */ PG_TRY(); { - /* call pg_total_relation_size to get the active table size */ - active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_total_relation_size, ObjectIdGetDatum(relOid))); + /* call pg_table_size to get the active table size */ + active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_table_size, ObjectIdGetDatum(relOid))); } PG_CATCH(); { @@ -922,10 +921,9 @@ calculate_table_disk_usage(bool is_init) tsentry->tablespaceoid = classForm->reltablespace; } } + heap_freetuple(classTup); } - heap_endscan(relScan); - heap_close(classRel, AccessShareLock); hash_destroy(local_active_table_stat_map); /* diff --git a/sql/test_index.sql b/sql/test_index.sql new file mode 100644 index 00000000000..4dca18d5991 --- /dev/null +++ b/sql/test_index.sql @@ -0,0 +1,46 @@ +-- Test schema +-- start_ignore +\! mkdir /tmp/indexspc +-- end_ignore +CREATE SCHEMA indexschema1; +DROP TABLESPACE IF EXISTS indexspc; +CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; +SET search_path TO indexschema1; + +CREATE TABLE test_index_a(i int) TABLESPACE indexspc; +INSERT INTO test_index_a SELECT generate_series(1,20000); +SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); +SELECT pg_sleep(5); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and relname='test_index_a' and segid=-1; +-- create index for the table, index in default tablespace +CREATE INDEX a_index ON test_index_a(i); +INSERT INTO test_index_a SELECT generate_series(1,10000); +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; +-- add index to tablespace indexspc +ALTER index a_index SET TABLESPACE indexspc; +SELECT pg_sleep(20); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); + +-- index tablespace quota exceeded +ALTER table test_index_a SET TABLESPACE pg_default; +SELECT pg_sleep(5); +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +INSERT INTO test_index_a SELECT generate_series(1,200000); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +RESET search_path; +DROP INDEX indexschema1.a_index; +DROP TABLE indexschema1.test_index_a; +DROP SCHEMA indexschema1; +DROP TABLESPACE indexspc; +\! rm -rf /tmp/indexspc diff --git a/sql/test_table_size.sql b/sql/test_table_size.sql index aad12e837ad..eb8c54bacbf 100644 --- a/sql/test_table_size.sql +++ b/sql/test_table_size.sql @@ -7,8 +7,6 @@ insert into a select * from generate_series(1,10000); select pg_sleep(2); create table buffer(oid oid, relname name, size bigint); -insert into buffer select oid, relname, pg_total_relation_size(oid) from pg_class, diskquota.table_size as dt where dt.size = oid and relname = 'a'; - -insert into buffer select oid, relname, sum(pg_total_relation_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; +insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; From 1f30cb65249e580de2d95ca14fda529670d93dfa Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Fri, 10 Sep 2021 15:19:45 +0800 Subject: [PATCH 072/330] Support downgrade diskquota extension from 2.0 to 1.0 (#66) --- Makefile | 2 +- concourse/scripts/build_diskquota.sh | 3 ++ diskquota--1.0--2.0.sql | 8 ++- diskquota--2.0--1.0.sql | 52 +++++++++++++++++++ upgrade_test/Makefile | 2 +- upgrade_test/diskquota_schedule_downgrade | 23 ++++++++ upgrade_test/expected/downgrade_extension.out | 2 + upgrade_test/sql/downgrade_extension.sql | 2 + 8 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 diskquota--2.0--1.0.sql create mode 100644 upgrade_test/diskquota_schedule_downgrade create mode 100644 upgrade_test/expected/downgrade_extension.out create mode 100644 upgrade_test/sql/downgrade_extension.sql diff --git a/Makefile b/Makefile index b7ce8b4e300..fba26081290 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ MODULE_big = diskquota EXTENSION = diskquota -DATA = diskquota--1.0.sql diskquota--2.0.sql diskquota--1.0--2.0.sql +DATA = diskquota--1.0.sql diskquota--2.0.sql diskquota--1.0--2.0.sql diskquota--2.0--1.0.sql SRCDIR = ./ FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index b0f60df46cf..1eea6182432 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -28,6 +28,7 @@ function pkg() { share/postgresql/extension/diskquota--1.0.sql \ share/postgresql/extension/diskquota--2.0.sql \ share/postgresql/extension/diskquota--1.0--2.0.sql \ + share/postgresql/extension/diskquota--2.0--1.0.sql \ install_gpdb_component ;; rhel7) @@ -37,6 +38,7 @@ function pkg() { share/postgresql/extension/diskquota--1.0.sql \ share/postgresql/extension/diskquota--2.0.sql \ share/postgresql/extension/diskquota--1.0--2.0.sql \ + share/postgresql/extension/diskquota--2.0--1.0.sql \ install_gpdb_component ;; ubuntu18.04) @@ -46,6 +48,7 @@ function pkg() { share/postgresql/extension/diskquota--1.0.sql \ share/postgresql/extension/diskquota--2.0.sql \ share/postgresql/extension/diskquota--1.0--2.0.sql \ + share/postgresql/extension/diskquota--2.0--1.0.sql \ install_gpdb_component ;; *) echo "Unknown OS: $OSVER"; exit 1 ;; diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 8021f216b50..299411620b5 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -71,4 +71,10 @@ GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1)) AS dbsize; -ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; +-- Need to drop the old type and functions, then recreate them to make the gpdb to reload the new functions +DROP FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]); +DROP TYPE diskquota.diskquota_active_table_type; +CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8, "GP_SEGMENT_ID" smallint); +CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type +AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' +LANGUAGE C VOLATILE; diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql new file mode 100644 index 00000000000..e93cad5c1c0 --- /dev/null +++ b/diskquota--2.0--1.0.sql @@ -0,0 +1,52 @@ +DROP FUNCTION IF EXISTS diskquota.set_schema_tablespace_quota(text, text, text); + +DROP FUNCTION IF EXISTS diskquota.set_role_tablespace_quota(text, text, text); + +DROP FUNCTION IF EXISTS diskquota.set_per_segment_quota(text, float4); + + +CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_namespace as pgns +where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace +group by relnamespace, qc.quotalimitMB, pgns.nspname +order by pgns.nspname; + +CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes +from diskquota.table_size as ts, + pg_class as pgc, + diskquota.quota_config as qc, + pg_roles as pgr +WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid +GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; + +DROP VIEW IF EXISTS diskquota.show_fast_schema_tablespace_quota_view; +DROP VIEW IF EXISTS diskquota.show_fast_role_tablespace_quota_view; + +CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS +SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; + +-- Need to drop the old type and functions, then recreate them to make the gpdb to reload the new functions +DROP FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]); +DROP TYPE diskquota.diskquota_active_table_type; +CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); +CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type +AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' +LANGUAGE C VOLATILE; + +DROP TABLE IF EXISTS diskquota.target; +ALTER TABLE diskquota.quota_config DROP COLUMN segratio; +-- clean table_size and frop segid column +-- delete segments table size +DELETE FROM diskquota.table_size WHERE segid != -1; +-- delete tablespace quota config +DELETE FROM diskquota.quota_config WHERE quotatype=2 or quotatype=3; +ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; +ALTER TABLE diskquota.table_size SET DISTRIBUTED RANDOMLY; +ALTER TABLE diskquota.table_size DROP COLUMN segid; +ALTER TABLE diskquota.table_size SET DISTRIBUTED BY (tableid); +ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid); diff --git a/upgrade_test/Makefile b/upgrade_test/Makefile index 7acfdda526d..5089f2d09ef 100644 --- a/upgrade_test/Makefile +++ b/upgrade_test/Makefile @@ -1,5 +1,5 @@ REGRESS = dummy -REGRESS_OPTS = --schedule=diskquota_schedule_upgrade --init-file=init_file +REGRESS_OPTS = --schedule=diskquota_schedule_upgrade --schedule=diskquota_schedule_downgrade --init-file=init_file PGXS := $(shell pg_config --pgxs) include $(PGXS) diff --git a/upgrade_test/diskquota_schedule_downgrade b/upgrade_test/diskquota_schedule_downgrade new file mode 100644 index 00000000000..a030ec15160 --- /dev/null +++ b/upgrade_test/diskquota_schedule_downgrade @@ -0,0 +1,23 @@ +# Test new version diskquota with old extension script +test: install_new_version +test: init +test: prepare +test: set_config +# execute downgrade scripts +test: downgrade_extension +test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota +test: clean + +# Test downgrade extension to old version +# run by old version diskquota +test: install_old_version +test: install_new_version +test: init +test: prepare +test: set_config +test: downgrade_extension +# downgrade diskquota to old version +test: install_old_version +# run by old version diskquota +test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota +test: clean diff --git a/upgrade_test/expected/downgrade_extension.out b/upgrade_test/expected/downgrade_extension.out new file mode 100644 index 00000000000..d10f1216c50 --- /dev/null +++ b/upgrade_test/expected/downgrade_extension.out @@ -0,0 +1,2 @@ +\set old_version `echo $OLD_VERSION` +alter extension diskquota update to :'old_version'; diff --git a/upgrade_test/sql/downgrade_extension.sql b/upgrade_test/sql/downgrade_extension.sql new file mode 100644 index 00000000000..d10f1216c50 --- /dev/null +++ b/upgrade_test/sql/downgrade_extension.sql @@ -0,0 +1,2 @@ +\set old_version `echo $OLD_VERSION` +alter extension diskquota update to :'old_version'; From 674a941ce74ac525c05c9603d02e3fe80c0377b5 Mon Sep 17 00:00:00 2001 From: Xing GUO Date: Sat, 18 Sep 2021 12:31:31 +0800 Subject: [PATCH 073/330] Remove non-sense pattern from .gitignore generated by merge conflict (#68) --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index bc12174ab3b..ffc330f1fec 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,6 @@ regression.out regression.diffs /results/ -<<<<<<< HEAD .vscode upgrade_test/regression.out From 148f6653f4d51c91e00a9437af3f8db81a7b0633 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 22 Sep 2021 10:16:57 +0800 Subject: [PATCH 074/330] Fix typo: owenr -> owner. (#70) Resolve #40. --- quotamodel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quotamodel.c b/quotamodel.c index 1231a50d728..9c7542ca2ec 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -772,7 +772,7 @@ calculate_table_disk_usage(bool is_init) } /* - * scan pg_class to detect table event: drop, reset schema, reset owenr. + * scan pg_class to detect table event: drop, reset schema, reset owner. * calculate the file size for active table and update namespace_size_map * and role_size_map */ From de8418b274e7b50a84b1ae6246140347ea5c9243 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 27 Sep 2021 10:33:10 +0800 Subject: [PATCH 075/330] Add support for pausing and resuming the extension. (#69) This change adds two additional functions for user calling: 1. diskquota.pause() After this function being called, diskquota keeps counting the disk usage but doesn't emit an error when the disk usage limit is exceeded. 2. diskquota.resume() After this function being called, diskquota resumes to emit an error when the disk usage limit is exceeded. --- diskquota--1.0--2.0.sql | 10 ++++ diskquota--2.0--1.0.sql | 3 ++ diskquota--2.0.sql | 10 ++++ diskquota.c | 7 +++ diskquota.h | 2 + diskquota_schedule | 1 + diskquota_utility.c | 85 ++++++++++++++++++++++++++++++ expected/test_pause_and_resume.out | 56 ++++++++++++++++++++ quotamodel.c | 22 +++++++- sql/test_pause_and_resume.sql | 37 +++++++++++++ 10 files changed, 231 insertions(+), 2 deletions(-) create mode 100644 expected/test_pause_and_resume.out create mode 100644 sql/test_pause_and_resume.sql diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 299411620b5..9803eeb59de 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -22,6 +22,16 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.pause() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_pause' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.resume() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_resume' +LANGUAGE C; + ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid,segid); diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index e93cad5c1c0..c79c00c18de 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -4,6 +4,9 @@ DROP FUNCTION IF EXISTS diskquota.set_role_tablespace_quota(text, text, text); DROP FUNCTION IF EXISTS diskquota.set_per_segment_quota(text, float4); +DROP FUNCTION IF EXISTS diskquota.pause(); + +DROP FUNCTION IF EXISTS diskquota.resume(); CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 4a4c4b0f700..e98ad3f3ac5 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -64,6 +64,16 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.pause() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_pause' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.resume() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_resume' +LANGUAGE C; + CREATE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes from diskquota.table_size as ts, diff --git a/diskquota.c b/diskquota.c index a8b29d8d3d0..c7e05544e3b 100644 --- a/diskquota.c +++ b/diskquota.c @@ -84,6 +84,13 @@ ExtensionDDLMessage *extension_ddl_message = NULL; static HTAB *disk_quota_worker_map = NULL; static int num_db = 0; +/* + * diskquota_paused is a flag used to pause the extension (when the flag is + * enabled, the extension keeps counting the disk usage but doesn't emit an + * error when the disk usage limit is exceeded). + */ +bool *diskquota_paused = NULL; + /* functions of disk quota*/ void _PG_init(void); void _PG_fini(void); diff --git a/diskquota.h b/diskquota.h index 696f6f18fd9..c19a5a068e8 100644 --- a/diskquota.h +++ b/diskquota.h @@ -36,6 +36,7 @@ struct DiskQuotaLocks LWLock *extension_ddl_message_lock; LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ LWLock *monitoring_dbid_cache_lock; + LWLock *paused_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; @@ -90,6 +91,7 @@ typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; +extern bool *diskquota_paused; /* drop extension hook */ extern void register_diskquota_object_access_hook(void); diff --git a/diskquota_schedule b/diskquota_schedule index 068adfe7a99..7e968f0b493 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -12,4 +12,5 @@ test: test_vacuum test: test_primary_failure test: test_extension test: test_manytable +test: test_pause_and_resume test: clean diff --git a/diskquota_utility.c b/diskquota_utility.c index cf709050c4b..fabfd1324a6 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -40,9 +40,12 @@ #include "utils/memutils.h" #include "utils/numeric.h" #include "utils/snapmgr.h" +#include "libpq-fe.h" #include #include +#include +#include #include "diskquota.h" #include "gp_activetable.h" @@ -51,6 +54,8 @@ PG_FUNCTION_INFO_V1(init_table_size_table); PG_FUNCTION_INFO_V1(diskquota_start_worker); +PG_FUNCTION_INFO_V1(diskquota_pause); +PG_FUNCTION_INFO_V1(diskquota_resume); PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); @@ -289,6 +294,86 @@ diskquota_start_worker(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Dispatch pausing/resuming command to segments. + */ +static void +dispatch_pause_or_resume_command(bool pause_extension) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + int i; + StringInfoData sql; + + initStringInfo(&sql); + appendStringInfo(&sql, "SELECT diskquota.%s", pause_extension ? "pause()" : "resume()"); + CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); + + for (i = 0; i < cdb_pgresults.numResults; ++i) + { + PGresult *pgresult = cdb_pgresults.pg_results[i]; + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("[diskquota] %s extension on segments, encounter unexpected result from segment: %d", + pause_extension ? "pausing" : "resuming", + PQresultStatus(pgresult)))); + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); +} + +/* + * Set diskquota_paused to true. + * This function is called by user. After this function being called, diskquota + * keeps counting the disk usage but doesn't emit an error when the disk usage + * limit is exceeded. + */ +Datum +diskquota_pause(PG_FUNCTION_ARGS) +{ + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to pause diskquota"))); + } + + LWLockAcquire(diskquota_locks.paused_lock, LW_EXCLUSIVE); + *diskquota_paused = true; + LWLockRelease(diskquota_locks.paused_lock); + + if (IS_QUERY_DISPATCHER()) + dispatch_pause_or_resume_command(true /* pause_extension */); + + PG_RETURN_VOID(); +} + +/* + * Set diskquota_paused to false. + * This function is called by user. After this function being called, diskquota + * resume to emit an error when the disk usage limit is exceeded. + */ +Datum +diskquota_resume(PG_FUNCTION_ARGS) +{ + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to resume diskquota"))); + } + + LWLockAcquire(diskquota_locks.paused_lock, LW_EXCLUSIVE); + *diskquota_paused = false; + LWLockRelease(diskquota_locks.paused_lock); + + if (IS_QUERY_DISPATCHER()) + dispatch_pause_or_resume_command(false /* pause_extension */); + + PG_RETURN_VOID(); +} + /* * Check whether database is empty (no user table created) */ diff --git a/expected/test_pause_and_resume.out b/expected/test_pause_and_resume.out new file mode 100644 index 00000000000..f0cee34e2a6 --- /dev/null +++ b/expected/test_pause_and_resume.out @@ -0,0 +1,56 @@ +-- Test pause and resume. +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE TABLE a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE b(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +-- pause extension +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- resume extension +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name:s1 +RESET search_path; +DROP TABLE s1.a, s1.b; +DROP SCHEMA s1; diff --git a/quotamodel.c b/quotamodel.c index 9c7542ca2ec..e63762b21a3 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -402,10 +402,11 @@ disk_quota_shmem_startup(void) init_lwlocks(); /* - * Three shared memory data. extension_ddl_message is used to handle + * Four shared memory data. extension_ddl_message is used to handle * diskquota extension create/drop command. disk_quota_black_map is used * to store out-of-quota blacklist. active_tables_map is used to store - * active tables whose disk usage is changed. + * active tables whose disk usage is changed. diskquota_paused is a flag + * used to pause the extension. */ extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), @@ -437,6 +438,12 @@ disk_quota_shmem_startup(void) &hash_ctl, HASH_ELEM | HASH_FUNCTION); + diskquota_paused = ShmemInitStruct("diskquota_paused", + sizeof(bool), + &found); + if (!found) + memset((void *) diskquota_paused, 0, sizeof(bool)); + LWLockRelease(AddinShmemInitLock); } @@ -458,6 +465,7 @@ init_lwlocks(void) diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); + diskquota_locks.paused_lock = LWLockAssign(); } /* @@ -473,6 +481,7 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(GlobalBlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); + size += sizeof(bool); /* sizeof(*diskquota_paused) */ return size; } @@ -1326,6 +1335,7 @@ quota_check_common(Oid reloid) Oid nsOid = InvalidOid; Oid tablespaceoid = InvalidOid; bool found; + bool paused; BlackMapEntry keyitem; GlobalBlackMapEntry *entry; @@ -1339,6 +1349,14 @@ quota_check_common(Oid reloid) { return true; } + + LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); + paused = *diskquota_paused; + LWLockRelease(diskquota_locks.paused_lock); + + if (paused) + return true; + LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { diff --git a/sql/test_pause_and_resume.sql b/sql/test_pause_and_resume.sql new file mode 100644 index 00000000000..437146e71cb --- /dev/null +++ b/sql/test_pause_and_resume.sql @@ -0,0 +1,37 @@ +-- Test pause and resume. +CREATE SCHEMA s1; +SET search_path TO s1; + +CREATE TABLE a(i int); +CREATE TABLE b(i int); + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); + +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT pg_sleep(5); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +-- pause extension +SELECT diskquota.pause(); + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); + +-- resume extension +SELECT diskquota.resume(); + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); + +RESET search_path; +DROP TABLE s1.a, s1.b; +DROP SCHEMA s1; + From ab3df5c4fd288f7e2fd37acb0473274a0694a8cc Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Tue, 28 Sep 2021 21:31:30 +0800 Subject: [PATCH 076/330] Fix incorrect number of locks in diskquota. (#71) Currently, we use DiskQuotaLocksItemNumber to count the number of locks. In de8418b, we forget to update it and it's causing memory issues. To avoid such issues in future, we use sizeof(DiskQuotaLocks) / sizeof(void*) to calculate it. Co-authored-by: Hao Zhang --- diskquota.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diskquota.h b/diskquota.h index c19a5a068e8..ca644be0ba8 100644 --- a/diskquota.h +++ b/diskquota.h @@ -28,7 +28,6 @@ typedef enum DISKQUOTA_READY_STATE } DiskQuotaState; -#define DiskQuotaLocksItemNumber (5) struct DiskQuotaLocks { LWLock *active_table_lock; @@ -39,6 +38,7 @@ struct DiskQuotaLocks LWLock *paused_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; +#define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) /* * MessageBox is used to store a message for communication between From e6334cb5e762f98656ea1d5c6ca690c4835fef27 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 14 Oct 2021 15:14:57 +0800 Subject: [PATCH 077/330] Make NUM_ATTRS into a macro NUM_QUOTA_CONFIG_ATTRS. (#74) This change helps making NUM_ATTRS into a macro to improve the readability. NFCI. --- quotamodel.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index e63762b21a3..ac3f5273b57 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -59,6 +59,8 @@ /* per database level max size of black list */ #define MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES 8192 #define MAX_NUM_KEYS_QUOTA_MAP 8 +/* Number of attributes in quota configuration records. */ +#define NUM_QUOTA_CONFIG_ATTRS 5 typedef struct TableSizeEntry TableSizeEntry; typedef struct NamespaceSizeEntry NamespaceSizeEntry; @@ -1209,7 +1211,6 @@ do_load_quotas(void) * config change. */ clear_all_quota_maps(); - const unsigned int NUM_ATTRIBUTES = 5; extMajorVersion = get_ext_major_version(); /* @@ -1249,7 +1250,7 @@ do_load_quotas(void) errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != NUM_ATTRIBUTES || + if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0])->atttypid != OIDOID || ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) @@ -1264,10 +1265,10 @@ do_load_quotas(void) for (i = 0; i < SPI_processed; i++) { HeapTuple tup = SPI_tuptable->vals[i]; - Datum vals[NUM_ATTRIBUTES]; - bool isnull[NUM_ATTRIBUTES]; + Datum vals[NUM_QUOTA_CONFIG_ATTRS]; + bool isnull[NUM_QUOTA_CONFIG_ATTRS]; - for (int i = 0; i < NUM_ATTRIBUTES; ++i) + for (int i = 0; i < NUM_QUOTA_CONFIG_ATTRS; ++i) { vals[i] = SPI_getbinval(tup, tupdesc, i + 1, &(isnull[i])); if (i <= 2 && isnull[i]) From 8e0cae7fd2edbf8e29e6917f3fee8a1cb2ce2485 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 14 Oct 2021 15:15:14 +0800 Subject: [PATCH 078/330] Create database 'diskquota' before running regression tests. (#75) We have to create database 'diskquota' manually before running regression tests. This change helps resolve it by creating the database in the init stage. --- sql/init.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sql/init.sql b/sql/init.sql index e8b1d49854f..921be5295db 100644 --- a/sql/init.sql +++ b/sql/init.sql @@ -1,3 +1,7 @@ +-- start_ignore +CREATE DATABASE diskquota; +-- end_ignore + -- start_ignore \! gpconfig -c shared_preload_libraries -v diskquota > /dev/null -- end_ignore From 591879bc2ea112995d1afe472109dded686c1553 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 4 Nov 2021 10:16:52 +0800 Subject: [PATCH 079/330] Add RFC for hard limit (#76) --- doc/rfc_001_hard_limit.md | 103 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 doc/rfc_001_hard_limit.md diff --git a/doc/rfc_001_hard_limit.md b/doc/rfc_001_hard_limit.md new file mode 100644 index 00000000000..de8357bbe20 --- /dev/null +++ b/doc/rfc_001_hard_limit.md @@ -0,0 +1,103 @@ +# [RFC 001] Hard Limit for Diskquota + +This document describes the design of the hard limit feature for Diskquota 2.0. + +## Motivation + +Diskquota 1.0 only supports so-call "soft limit", meaning that Diskquota will not interrupt any running query even though the amount of data the query writes exceeds some quota. + +Common types of queries that can write a large amount of data include +- `CREATE TABLE AS` +- `CREATE INDEX` +- `VACUUM FULL` + +Running one single query of such types can take up all the space of a disk, which can cause issues, such as a [Disk Full Failure](https://www.postgresql.org/docs/current/disk-full.html) that crashes the whole database system at worst. + +Therefore, to mitigate the risk of having disk full issues, we plan to introduce "hard limit" in Diskquota 2.0, which enables Diskquota to terminate an in-progress query if the amount of data it writes exceeds some quota. + +Due to the difficulty of observing the intermediate states of an in-progress query in Greenplum, implementing hard limit is not easy. Specifically, there are two major challenges in the way: +1. Observing intermediate states of a query under Greenplum's MVCC mechanism. +2. Ensuring data consistency after seeing uncommitted changes. + +The rest of this doc will analyze the challenges, propose possible approaches to tackle them, and introduce the design decisions with the rationales behind. + +## Challenge 1: Observing Intermediate States + +Diskquota cares about what relations, including tables, indexes, and more, that receives new data. Those relations are called "**active**" relations. Diskquota uses background workers (bgworkers) to collect active relations periodically and then calculates their sizes using an OS system call like `stat()`. + +Active relations can be produced in two ways: +- Case 1: By writing new data to existing relations, e.g., using `INSERT` or `COPY FROM`. In this case, Diskquota do not need to observe any intermediate state during execution because the information of the active relations is committed and is visible to the background worker. +- Case 2: By creating new relations with data, e.g., using `CREATE TABLE AS` or `CREATE INDEX`. This is the hard part. In this case, the information of the active relations and has not been committed yet during execution. Therefore, the information is not visible to the bgworkers when it scans the catalog tables under MVCC. + +For Case 2, to enable the bgworkers to observe the active relations created by an in-progress query, there are two options: +1. **The `SNAPSHOT_DIRTY` approach:** Disregarding MVCC and scanning the catalog tables using `SNAPSHOT_DIRTY`. In this way, the bgworkers can see uncommitted information of the active relations by doing a table scan. +2. **The pub-sub approach:** Publishing the information of newly created active relations to a shared memory area using hooks when executing a query. For example, we can use the `object_access_hook` to write the information in the relation cache of a relation that is being created to the shared memory area. The bgworkers can then retrieve the information from the shared memory area periodically. + +## Challenge 2: Ensuring Data Consistency + +Since bgworkers are allowed to observe uncommitted states, extra work is required to ensure the bgworkers will never see inconsistent snapshots for both options. +- For the `SNAPSHOT_DIRTY` approach, it is required to determine which version should take effect given that there may be multiple versions for one tuple, including the versions created by aborted transactions. +- For the pub-sub approach, it is required to sync the information in the shared memory area against the latest committed version of the catalogs. + +The `SNAPSHOT_DIRTY` approach is more complicated and more error-prone than the pub-sub approach since it requires Diskquota to do visibility checks on its own. Therefore, we choose the pub-sub approach to implement hard limit. + +Even though taking the pub-sub approach frees us from the complicated visibility check process, keeping the shared memory area and the catalogs in sync is still non-trivial. Note that the information of a relation in the catalogs can either be updated by altering the relation, or be deleted by dropping the relation. A natural idea is to monitor each of these operations, e.g., using the `object_access_hook`, and replay it to the shared memory area. However, this does not solve the consistency issue because these operations can be aborted. Due to the MVCC mechanism, nothing needs to be done to the catalogs when aborting such operations and no hook can be used to rollback the changes to the shared memory area at that time. + +### Aligning with the Catalogs + +Given that it is useless to replay each modification operation to the shared memory area, we choose not to replay any operation at all but to align the entries in the shared memory area against tuples in the catalogs. + +Specifically, for each entry in the shared memory area, search the catalogs for the tuple with the same key under MVCC, then +- if a tuple is found in the catalogs, that tuple must be written by the latest committed transaction and therefore must be no later than the transaction that writes the entry to the shared memory area. Therefore, the tuple in the catalogs prevails and the shared memory entry is deleted. +- otherwise, there are still two cases: + 1. **Tuple Uncommitted:** the transaction that writes the entry to the shared memory area is the latest one and has not yet been committed. In this case, Diskquota should use the information in the shared memory entry since it is the only source. + 2. **Tuple Deleted:** the tuple in the catalogs has been deleted by a committed transaction and the shared memory area has not been cleaned. We must prevent this case from happening because it is hard to distinguish it from the uncommitted case. Fortunately, Greenplum provides an `unlink` hook that gets called at the end of a transaction to delete files of relations. Diskquota can use the `unlink` hook to delete entries that corresponding to relations to be deleted from the shared memory area. + +The alignment process is summarized as the following two pieces of pseudo code: +- Each time the Diskquota bgworker retrieves information of active relations, do + ```python + for entry in shared memory area: + tuple = SearchSysCache(entry.key) + if HeapTupleIsValid(tuple): + del entry from shared memory area + ``` +- Each time the `unlink` hook gets called for a `relfilenode`, do + ```python + entry = Search shared memory area by relfilenode + del entry from shared memory area + ``` + +With alignment, entries in the shared memory area only represents uncommitted relations and tuples in the catalogs are used for committed relations. There is no intersection between the two sets, which guarantees that the Diskquota bgworker will always see a consistent snapshot. + +### Limitations and Workarounds + +While the pub-sub approach with alignment enables Diskquota to observe uncommitted active relations and guarantees data consistency, it does have some inherent limitations. + +One of the most notable limitation is that it does not support hard limit for any operation that modifies existing tuples in the catalogs, such as +- `ALTER TABLE` +- `DROP TABLE` +- `TRUNCATE` + +Such operations will not be visible to Diskquota until the transaction is committed. For example, if a user changes the tablespace of a table `t` using +```sql +ALTER TABLE t SET TABLESPACE new_tablespace; +``` + +From the Diskquota's perspective, table `t` still belongs to the old tablespace when it is being copied to the new tablespace. As a result, the size of table `t` will be limited by the quota on the *old* tablespace instead of the *new* tablespace until the `ALTER TABLE` command is completed. + +The root cause of this limitation that such modification operations will not take effect until the transaction is committed. Specifically, +- Due to MVCC, they will not take effect **in the catalogs** until committed. +- Due to the alignment mechanism, they will not take effect **in the shared memory area** neither given that table `t` is already visible from the catalogs to Diskquota and the corresponding shared memory entry will be deleted when the bgworker retrives active relations. + +One way to overcome this limitation is to enhance the **soft limit** mechanism to calculate the resulting quota usage of such catalog modification operations and reject those that will cause quota excess before execution. This is also not trivial to implement but is in our plan. + +For now, as a workaround, in order to make the catalog modification operations hard-limited based on the new information of relations instead of the old information, the user can use the `CREATE AS` command to create a new relation with the new information and then drop the old one. Because Diskquota can see relations that have not yet been committed, the `CREATE AS` command can be hard-limited and will be hard-limited based on the new infomation. + +In the above example of changing the tablespace, in order to count the size of table `t` in the quota usage of the new tablespace, the user can replace the `ALTER TABLE` command with the following `CRATE`-`DROP`-`RENAME` transaction: +```sql +BEGIN; +CREATE TABLE t_1 TABLESPACE new_tablespace AS SELECT * FROM t; +DROP TABLE t; +ALTER TABLE t_1 RENAME TO t; +COMMIT; +``` From 649e21ab609211535d6130a03687982b8b19e48c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Fri, 12 Nov 2021 15:40:54 +0800 Subject: [PATCH 080/330] Fix stack full issue when flush to table_size (#81) * Fix stack full issue when flush to table_size * Restore optimizer after flushing * Resolve deadlock and add expected test result --- diskquota_schedule | 1 + diskquota_utility.c | 2 +- expected/test_many_active_tables.out | 1030 ++++++++++++++++++++++++++ quotamodel.c | 54 +- sql/test_many_active_tables.sql | 17 + 5 files changed, 1081 insertions(+), 23 deletions(-) create mode 100644 expected/test_many_active_tables.out create mode 100644 sql/test_many_active_tables.sql diff --git a/diskquota_schedule b/diskquota_schedule index 7e968f0b493..7941611c386 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -13,4 +13,5 @@ test: test_primary_failure test: test_extension test: test_manytable test: test_pause_and_resume +test: test_many_active_tables test: clean diff --git a/diskquota_utility.c b/diskquota_utility.c index fabfd1324a6..e0ef8542d5c 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1184,7 +1184,7 @@ get_rel_oid_list(void) oidlist = lappend_oid(oidlist, lfirst_oid(l)); } } - relation_close(relation, NoLock); + relation_close(relation, AccessShareLock); list_free(indexIds); } } diff --git a/expected/test_many_active_tables.out b/expected/test_many_active_tables.out new file mode 100644 index 00000000000..be8da723287 --- /dev/null +++ b/expected/test_many_active_tables.out @@ -0,0 +1,1030 @@ +CREATE TABLE t1 (pk int, val int) +DISTRIBUTED BY (pk) +PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); +NOTICE: CREATE TABLE will create partition "t1_1_prt_1" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_2" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_3" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_4" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_5" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_6" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_7" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_8" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_9" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_10" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_11" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_12" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_13" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_14" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_15" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_16" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_17" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_18" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_19" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_20" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_21" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_22" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_23" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_24" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_25" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_26" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_27" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_28" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_29" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_30" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_31" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_32" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_33" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_34" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_35" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_36" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_37" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_38" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_39" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_40" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_41" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_42" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_43" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_44" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_45" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_46" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_47" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_48" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_49" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_50" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_51" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_52" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_53" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_54" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_55" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_56" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_57" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_58" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_59" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_60" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_61" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_62" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_63" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_64" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_65" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_66" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_67" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_68" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_69" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_70" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_71" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_72" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_73" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_74" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_75" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_76" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_77" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_78" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_79" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_80" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_81" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_82" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_83" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_84" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_85" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_86" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_87" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_88" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_89" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_90" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_91" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_92" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_93" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_94" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_95" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_96" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_97" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_98" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_99" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_100" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_101" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_102" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_103" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_104" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_105" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_106" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_107" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_108" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_109" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_110" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_111" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_112" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_113" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_114" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_115" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_116" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_117" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_118" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_119" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_120" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_121" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_122" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_123" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_124" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_125" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_126" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_127" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_128" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_129" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_130" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_131" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_132" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_133" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_134" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_135" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_136" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_137" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_138" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_139" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_140" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_141" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_142" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_143" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_144" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_145" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_146" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_147" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_148" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_149" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_150" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_151" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_152" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_153" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_154" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_155" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_156" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_157" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_158" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_159" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_160" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_161" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_162" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_163" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_164" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_165" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_166" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_167" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_168" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_169" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_170" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_171" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_172" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_173" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_174" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_175" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_176" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_177" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_178" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_179" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_180" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_181" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_182" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_183" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_184" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_185" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_186" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_187" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_188" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_189" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_190" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_191" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_192" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_193" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_194" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_195" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_196" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_197" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_198" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_199" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_200" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_201" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_202" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_203" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_204" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_205" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_206" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_207" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_208" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_209" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_210" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_211" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_212" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_213" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_214" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_215" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_216" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_217" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_218" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_219" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_220" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_221" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_222" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_223" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_224" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_225" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_226" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_227" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_228" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_229" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_230" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_231" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_232" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_233" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_234" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_235" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_236" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_237" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_238" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_239" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_240" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_241" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_242" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_243" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_244" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_245" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_246" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_247" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_248" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_249" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_250" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_251" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_252" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_253" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_254" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_255" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_256" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_257" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_258" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_259" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_260" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_261" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_262" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_263" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_264" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_265" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_266" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_267" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_268" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_269" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_270" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_271" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_272" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_273" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_274" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_275" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_276" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_277" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_278" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_279" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_280" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_281" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_282" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_283" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_284" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_285" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_286" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_287" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_288" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_289" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_290" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_291" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_292" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_293" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_294" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_295" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_296" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_297" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_298" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_299" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_300" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_301" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_302" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_303" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_304" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_305" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_306" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_307" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_308" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_309" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_310" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_311" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_312" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_313" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_314" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_315" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_316" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_317" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_318" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_319" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_320" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_321" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_322" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_323" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_324" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_325" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_326" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_327" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_328" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_329" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_330" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_331" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_332" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_333" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_334" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_335" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_336" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_337" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_338" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_339" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_340" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_341" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_342" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_343" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_344" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_345" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_346" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_347" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_348" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_349" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_350" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_351" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_352" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_353" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_354" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_355" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_356" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_357" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_358" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_359" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_360" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_361" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_362" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_363" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_364" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_365" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_366" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_367" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_368" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_369" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_370" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_371" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_372" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_373" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_374" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_375" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_376" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_377" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_378" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_379" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_380" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_381" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_382" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_383" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_384" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_385" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_386" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_387" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_388" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_389" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_390" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_391" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_392" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_393" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_394" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_395" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_396" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_397" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_398" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_399" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_400" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_401" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_402" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_403" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_404" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_405" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_406" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_407" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_408" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_409" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_410" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_411" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_412" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_413" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_414" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_415" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_416" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_417" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_418" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_419" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_420" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_421" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_422" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_423" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_424" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_425" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_426" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_427" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_428" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_429" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_430" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_431" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_432" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_433" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_434" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_435" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_436" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_437" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_438" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_439" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_440" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_441" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_442" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_443" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_444" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_445" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_446" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_447" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_448" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_449" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_450" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_451" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_452" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_453" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_454" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_455" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_456" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_457" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_458" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_459" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_460" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_461" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_462" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_463" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_464" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_465" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_466" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_467" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_468" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_469" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_470" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_471" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_472" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_473" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_474" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_475" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_476" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_477" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_478" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_479" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_480" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_481" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_482" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_483" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_484" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_485" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_486" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_487" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_488" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_489" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_490" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_491" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_492" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_493" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_494" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_495" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_496" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_497" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_498" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_499" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_500" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_501" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_502" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_503" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_504" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_505" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_506" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_507" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_508" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_509" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_510" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_511" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_512" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_513" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_514" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_515" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_516" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_517" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_518" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_519" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_520" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_521" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_522" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_523" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_524" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_525" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_526" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_527" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_528" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_529" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_530" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_531" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_532" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_533" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_534" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_535" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_536" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_537" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_538" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_539" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_540" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_541" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_542" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_543" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_544" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_545" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_546" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_547" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_548" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_549" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_550" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_551" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_552" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_553" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_554" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_555" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_556" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_557" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_558" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_559" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_560" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_561" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_562" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_563" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_564" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_565" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_566" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_567" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_568" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_569" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_570" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_571" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_572" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_573" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_574" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_575" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_576" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_577" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_578" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_579" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_580" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_581" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_582" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_583" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_584" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_585" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_586" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_587" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_588" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_589" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_590" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_591" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_592" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_593" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_594" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_595" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_596" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_597" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_598" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_599" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_600" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_601" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_602" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_603" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_604" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_605" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_606" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_607" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_608" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_609" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_610" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_611" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_612" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_613" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_614" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_615" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_616" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_617" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_618" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_619" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_620" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_621" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_622" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_623" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_624" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_625" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_626" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_627" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_628" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_629" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_630" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_631" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_632" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_633" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_634" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_635" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_636" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_637" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_638" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_639" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_640" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_641" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_642" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_643" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_644" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_645" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_646" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_647" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_648" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_649" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_650" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_651" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_652" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_653" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_654" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_655" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_656" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_657" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_658" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_659" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_660" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_661" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_662" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_663" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_664" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_665" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_666" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_667" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_668" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_669" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_670" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_671" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_672" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_673" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_674" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_675" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_676" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_677" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_678" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_679" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_680" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_681" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_682" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_683" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_684" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_685" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_686" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_687" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_688" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_689" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_690" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_691" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_692" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_693" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_694" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_695" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_696" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_697" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_698" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_699" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_700" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_701" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_702" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_703" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_704" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_705" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_706" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_707" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_708" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_709" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_710" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_711" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_712" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_713" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_714" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_715" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_716" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_717" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_718" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_719" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_720" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_721" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_722" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_723" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_724" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_725" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_726" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_727" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_728" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_729" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_730" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_731" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_732" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_733" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_734" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_735" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_736" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_737" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_738" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_739" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_740" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_741" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_742" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_743" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_744" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_745" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_746" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_747" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_748" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_749" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_750" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_751" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_752" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_753" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_754" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_755" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_756" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_757" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_758" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_759" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_760" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_761" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_762" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_763" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_764" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_765" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_766" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_767" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_768" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_769" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_770" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_771" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_772" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_773" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_774" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_775" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_776" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_777" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_778" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_779" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_780" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_781" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_782" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_783" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_784" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_785" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_786" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_787" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_788" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_789" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_790" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_791" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_792" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_793" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_794" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_795" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_796" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_797" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_798" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_799" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_800" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_801" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_802" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_803" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_804" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_805" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_806" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_807" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_808" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_809" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_810" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_811" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_812" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_813" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_814" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_815" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_816" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_817" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_818" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_819" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_820" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_821" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_822" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_823" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_824" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_825" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_826" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_827" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_828" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_829" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_830" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_831" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_832" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_833" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_834" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_835" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_836" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_837" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_838" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_839" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_840" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_841" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_842" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_843" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_844" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_845" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_846" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_847" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_848" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_849" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_850" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_851" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_852" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_853" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_854" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_855" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_856" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_857" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_858" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_859" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_860" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_861" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_862" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_863" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_864" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_865" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_866" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_867" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_868" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_869" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_870" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_871" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_872" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_873" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_874" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_875" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_876" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_877" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_878" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_879" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_880" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_881" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_882" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_883" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_884" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_885" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_886" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_887" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_888" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_889" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_890" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_891" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_892" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_893" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_894" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_895" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_896" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_897" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_898" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_899" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_900" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_901" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_902" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_903" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_904" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_905" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_906" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_907" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_908" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_909" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_910" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_911" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_912" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_913" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_914" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_915" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_916" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_917" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_918" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_919" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_920" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_921" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_922" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_923" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_924" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_925" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_926" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_927" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_928" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_929" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_930" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_931" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_932" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_933" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_934" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_935" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_936" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_937" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_938" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_939" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_940" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_941" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_942" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_943" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_944" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_945" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_946" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_947" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_948" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_949" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_950" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_951" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_952" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_953" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_954" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_955" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_956" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_957" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_958" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_959" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_960" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_961" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_962" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_963" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_964" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_965" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_966" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_967" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_968" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_969" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_970" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_971" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_972" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_973" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_974" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_975" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_976" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_977" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_978" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_979" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_980" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_981" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_982" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_983" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_984" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_985" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_986" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_987" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_988" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_989" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_990" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_991" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_992" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_993" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_994" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_995" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_996" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_997" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_998" for table "t1" +NOTICE: CREATE TABLE will create partition "t1_1_prt_999" for table "t1" +INSERT INTO t1 +SELECT pk, val +FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + +DROP TABLE t1; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + diff --git a/quotamodel.c b/quotamodel.c index ac3f5273b57..919a0dfd9c7 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -37,6 +37,7 @@ #include "storage/lwlock.h" #include "storage/shmem.h" #include "utils/builtins.h" +#include "utils/guc.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/snapmgr.h" @@ -688,7 +689,6 @@ refresh_disk_quota_usage(bool is_init) bool pushed_active_snap = false; bool ret = true; - elog(LOG, "refresh diskquota usage..."); StartTransactionCommand(); /* @@ -967,6 +967,7 @@ flush_to_table_size(void) TableSizeEntry *tsentry = NULL; StringInfoData delete_statement; StringInfoData insert_statement; + StringInfoData deleted_table_expr; bool delete_statement_flag = false; bool insert_statement_flag = false; int ret; @@ -974,21 +975,12 @@ flush_to_table_size(void) /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ - /* concatenate all the need_to_flush table to SQL string */ - initStringInfo(&delete_statement); - switch (extMajorVersion) - { - case 1: - appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ( "); - break; - case 2: - appendStringInfo(&delete_statement, "delete from diskquota.table_size where (tableid, segid) in ( "); - break; - default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + /* Disable ORCA since it does not support non-scalar subqueries. */ + bool old_optimizer = optimizer; + optimizer = false; + + initStringInfo(&deleted_table_expr); + appendStringInfo(&deleted_table_expr, "WITH deleted_table AS ( VALUES "); initStringInfo(&insert_statement); appendStringInfo(&insert_statement, "insert into diskquota.table_size values "); @@ -1001,10 +993,10 @@ flush_to_table_size(void) switch (extMajorVersion) { case 1: - appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + appendStringInfo(&deleted_table_expr, "%u, ", tsentry->reloid); break; case 2: - appendStringInfo(&delete_statement, "(%u,%d), ", tsentry->reloid, tsentry->segid); + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); break; default: ereport(ERROR, @@ -1026,14 +1018,14 @@ flush_to_table_size(void) case 1: if (tsentry->segid == -1) { - appendStringInfo(&delete_statement, "%u, ", tsentry->reloid); + appendStringInfo(&deleted_table_expr, "%u, ", tsentry->reloid); appendStringInfo(&insert_statement, "(%u,%ld), ", tsentry->reloid, tsentry->totalsize); delete_statement_flag = true; insert_statement_flag = true; } break; case 2: - appendStringInfo(&delete_statement, "(%u,%d), ", tsentry->reloid, tsentry->segid); + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); delete_statement_flag = true; insert_statement_flag = true; @@ -1045,13 +1037,29 @@ flush_to_table_size(void) } } } - truncateStringInfo(&delete_statement, delete_statement.len - strlen(", ")); + truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); truncateStringInfo(&insert_statement, insert_statement.len - strlen(", ")); - appendStringInfo(&delete_statement, ");"); + appendStringInfo(&deleted_table_expr, ")"); appendStringInfo(&insert_statement, ";"); if (delete_statement_flag) { + /* concatenate all the need_to_flush table to SQL string */ + initStringInfo(&delete_statement); + appendStringInfoString(&delete_statement, (const char *) deleted_table_expr.data); + switch (extMajorVersion) + { + case 1: + appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ( SELECT * FROM deleted_table );"); + break; + case 2: + appendStringInfo(&delete_statement, "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + } ret = SPI_execute(delete_statement.data, false, 0); if (ret != SPI_OK_DELETE) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -1064,6 +1072,8 @@ flush_to_table_size(void) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); } + + optimizer = old_optimizer; } /* diff --git a/sql/test_many_active_tables.sql b/sql/test_many_active_tables.sql new file mode 100644 index 00000000000..36e7d4f5dc2 --- /dev/null +++ b/sql/test_many_active_tables.sql @@ -0,0 +1,17 @@ +CREATE TABLE t1 (pk int, val int) +DISTRIBUTED BY (pk) +PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); + +INSERT INTO t1 +SELECT pk, val +FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; + +SELECT pg_sleep(5); + +SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; + +DROP TABLE t1; + +SELECT pg_sleep(5); + +SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; From fb2183aae0e16d31a3feb2b4f76cbb0027af4933 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Fri, 19 Nov 2021 16:26:06 +0800 Subject: [PATCH 081/330] Improve error handling in diskquota_fetch_table_stat(). (#82) Historically, diskquota fetches tables' size by applying pg_table_size() on a given oid list. It may invoke pg_table_size() several times in one transaction. In order to tolerate exceptions, we wrap it with PG_TRY() and consume any errors throwed by pg_table_size() using FlushErrorState(). It has the following 2 potential risks. 1. Deadlock: Considering we have a table 't1' (reloid=oid1) and it has a partition 't1_1_prt_11' (reloid=oid2). When diskquota_fetch_table_stat(1, '{oid2, oid1}'::oid[]) is called in session 'A', this UDF will invoke pg_table_size() on 't1_1_prt_11' and 't1' respectively. When pg_table_size('t1_1_prt_11') throws an exception, the AccessShareLock acquired from session 'A' will be held until the whole transaction ends. At the same time, when another session 'B' is performing 'ALTER TABLE t1', it will acquire the AccessExclusiveLock on 't1' and 't1_1_prt_11' respectively. As a result, the session 'A' is holding the AccessShareLock on 't1_1_prt_11' and is waiting for AccessShareLock on 't1'; the session 'B' is holding the AccessExclusiveLock on 't1' and is waiting for the AccessExclusiveLock on 't1_1_prt_11'. 2. Error message is missing. We should preserve the error message by calling CopyErrorData() before FlushErrorState() and elog(WARNING, error_messgae). Co-Authored-by: Xuebin Su Co-Authored-by: Hao Zhang --- Makefile | 13 +++++++++ diskquota_schedule | 1 + expected/test_fetch_table_stat.out | 38 ++++++++++++++++++++++++++ gp_activetable.c | 43 ++++++++++++++++++++++++++++-- init_file | 5 ++++ sql/test_fetch_table_stat.sql | 24 +++++++++++++++++ 6 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 expected/test_fetch_table_stat.out create mode 100644 sql/test_fetch_table_stat.sql diff --git a/Makefile b/Makefile index fba26081290..920a1456dc9 100644 --- a/Makefile +++ b/Makefile @@ -16,5 +16,18 @@ REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=init_file else REGRESS_OPTS = --schedule=diskquota_schedule --init-file=init_file endif + +# FIXME: This check is hacky, since test_fetch_table_stat relies on the +# gp_inject_fault extension, we detect if the extension is built with +# greenplum by checking the output of the command 'pg_config --configure'. +# In the future, if the diskquota is built with GPDB7, or we backport the +# commit below to 6X_STABLE, we don't need this check. +# https://github.com/greenplum-db/gpdb/commit/8b897b12f6cb13753985faacab8e4053bf797a8b +ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) +REGRESS_OPTS += --load-extension=gp_inject_fault +else +REGRESS_OPTS += --exclude-tests=test_fetch_table_stat +endif + PGXS := $(shell pg_config --pgxs) include $(PGXS) diff --git a/diskquota_schedule b/diskquota_schedule index 7941611c386..8c5594ad9f3 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -14,4 +14,5 @@ test: test_extension test: test_manytable test: test_pause_and_resume test: test_many_active_tables +test: test_fetch_table_stat test: clean diff --git a/expected/test_fetch_table_stat.out b/expected/test_fetch_table_stat.out new file mode 100644 index 00000000000..477bc264d06 --- /dev/null +++ b/expected/test_fetch_table_stat.out @@ -0,0 +1,38 @@ +-- +-- 1. Test that when an error occurs in diskquota_fetch_table_stat +-- the error message is preserved for us to debug. +-- +CREATE TABLE t_error_handling (i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- Inject an error to a segment server, since this UDF is only called on segments. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Dispatch diskquota_fetch_table_stat to segments. +-- There should be a warning message from segment server saying: +-- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' +-- We're not interested in the oid here, we aggregate the result by COUNT(*). +SELECT COUNT(*) + FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; +WARNING: fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' + count +------- + 1 +(1 row) + +-- Reset the fault injector to prevent future failure. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Do some clean-ups. +DROP TABLE t_error_handling; diff --git a/gp_activetable.c b/gp_activetable.c index 805311b885d..61a7ac4f038 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -16,6 +16,7 @@ #include "postgres.h" #include "access/htup_details.h" +#include "access/xact.h" #include "catalog/indexing.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" @@ -34,6 +35,7 @@ #include "storage/smgr.h" #include "utils/array.h" #include "utils/builtins.h" +#include "utils/faultinjector.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/relfilenodemap.h" @@ -453,6 +455,9 @@ get_active_tables_stats(ArrayType *array) } else { + MemoryContext oldcontext; + ResourceOwner oldowner; + relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); segId = GpIdentity.segindex; key.reloid = relOid; @@ -463,18 +468,52 @@ get_active_tables_stats(ArrayType *array) entry->segid = segId; /* - * avoid to generate ERROR if relOid is not existed (i.e. table - * has been droped) + * pg_table_size() may throw exceptions, in order not to abort the top level + * transaction, we start a subtransaction for it. This operation is expensive, + * but there're good reasons. E.g., + * When the subtransaction is aborted, the resources (e.g., locks) acquired + * in pg_table_size() are released in time. We can avoid potential deadlock + * risks by doing this. */ + oldcontext = CurrentMemoryContext; + oldowner = CurrentResourceOwner; + + BeginInternalSubTransaction(NULL /* save point name */); + /* Run inside the function's memory context. */ + MemoryContextSwitchTo(oldcontext); PG_TRY(); { /* call pg_table_size to get the active table size */ entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_table_size, ObjectIdGetDatum(relOid))); + +#ifdef FAULT_INJECTOR + SIMPLE_FAULT_INJECTOR("diskquota_fetch_table_stat"); +#endif + /* Commit the subtransaction. */ + ReleaseCurrentSubTransaction(); + MemoryContextSwitchTo(oldcontext); + CurrentResourceOwner = oldowner; } PG_CATCH(); { + ErrorData *edata; + + /* + * Save the error information, or we have no idea what is causing the + * exception. + */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); FlushErrorState(); + + /* Abort the subtransaction and rollback. */ + RollbackAndReleaseCurrentSubTransaction(); + MemoryContextSwitchTo(oldcontext); + CurrentResourceOwner = oldowner; + elog(WARNING, "%s", edata->message); + FreeErrorData(edata); + entry->tablesize = 0; } PG_END_TRY(); diff --git a/init_file b/init_file index 4f7aa9851f6..874fc9ec888 100644 --- a/init_file +++ b/init_file @@ -11,4 +11,9 @@ m/diskquota_utility.c:\d+\)/ s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ m/^CONTEXT:*/ s/^CONTEXT:/DETAIL:/ + +# Remove segment identifiers from error message. +# E.g., (slice1 XXX.XXX.XXX.XXX:XXXX pid=XXXX) +m/(slice\d+ [0-9.]+:\d+ pid=\d+)/ +s/(slice\d+ [0-9.]+:\d+ pid=\d+)// -- end_matchsubs diff --git a/sql/test_fetch_table_stat.sql b/sql/test_fetch_table_stat.sql new file mode 100644 index 00000000000..65cbfeb56dd --- /dev/null +++ b/sql/test_fetch_table_stat.sql @@ -0,0 +1,24 @@ +-- +-- 1. Test that when an error occurs in diskquota_fetch_table_stat +-- the error message is preserved for us to debug. +-- + +CREATE TABLE t_error_handling (i int); +-- Inject an error to a segment server, since this UDF is only called on segments. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Dispatch diskquota_fetch_table_stat to segments. +-- There should be a warning message from segment server saying: +-- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' +-- We're not interested in the oid here, we aggregate the result by COUNT(*). +SELECT COUNT(*) + FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; + +-- Reset the fault injector to prevent future failure. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Do some clean-ups. +DROP TABLE t_error_handling; From 62d7afc9ae18014c66b73acc6d12f606ccd0c1c2 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Tue, 23 Nov 2021 10:53:42 +0800 Subject: [PATCH 082/330] Add test case for appendonly relations. (#85) Currently, tests for appendonly relations is missing. This patch adds one test for that. Co-Authored-by: Xuebin Su Co-Authored-by: Hao Zhang --- diskquota_schedule | 1 + expected/test_appendonly.out | 76 ++++++++++++++++++++++++++++++++++++ sql/test_appendonly.sql | 47 ++++++++++++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 expected/test_appendonly.out create mode 100644 sql/test_appendonly.sql diff --git a/diskquota_schedule b/diskquota_schedule index 8c5594ad9f3..0caef975e3e 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -15,4 +15,5 @@ test: test_manytable test: test_pause_and_resume test: test_many_active_tables test: test_fetch_table_stat +test: test_appendonly test: clean diff --git a/expected/test_appendonly.out b/expected/test_appendonly.out new file mode 100644 index 00000000000..4cdcc66d589 --- /dev/null +++ b/expected/test_appendonly.out @@ -0,0 +1,76 @@ +-- Create new schema for running tests. +CREATE SCHEMA s_appendonly; +SET search_path TO s_appendonly; +CREATE TABLE t_ao(i int) WITH (appendonly=true); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. +CREATE INDEX index_t ON t_ao(i); +CREATE INDEX index_t2 ON t_aoco(i); +-- 1. Show that the relation's size in diskquota.table_size +-- is identical to the result of pg_table_size(). +INSERT INTO t_ao SELECT generate_series(1, 100); +INSERT INTO t_aoco SELECT generate_series(1, 100); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- Query the size of t_ao. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; + tableid | size +---------+-------- + t_ao | 590936 +(1 row) + +SELECT pg_table_size('t_ao'); + pg_table_size +--------------- + 590936 +(1 row) + +-- Query the size of t_aoco. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; + tableid | size +---------+-------- + t_aoco | 590352 +(1 row) + +SELECT pg_table_size('t_aoco'); + pg_table_size +--------------- + 590352 +(1 row) + +-- 2. Test that we are able to perform quota limit on appendonly tables. +SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- expect success. +INSERT INTO t_ao SELECT generate_series(1, 1000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- expect fail. +INSERT INTO t_ao SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name:s_appendonly +INSERT INTO t_aoco SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name:s_appendonly +DROP TABLE t_ao; +DROP TABLE t_aoco; +SET search_path TO DEFAULT; +DROP SCHEMA s_appendonly; diff --git a/sql/test_appendonly.sql b/sql/test_appendonly.sql new file mode 100644 index 00000000000..020fbd89997 --- /dev/null +++ b/sql/test_appendonly.sql @@ -0,0 +1,47 @@ +-- Create new schema for running tests. +CREATE SCHEMA s_appendonly; +SET search_path TO s_appendonly; + +CREATE TABLE t_ao(i int) WITH (appendonly=true); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column); +-- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. +CREATE INDEX index_t ON t_ao(i); +CREATE INDEX index_t2 ON t_aoco(i); + +-- 1. Show that the relation's size in diskquota.table_size +-- is identical to the result of pg_table_size(). +INSERT INTO t_ao SELECT generate_series(1, 100); +INSERT INTO t_aoco SELECT generate_series(1, 100); + +SELECT pg_sleep(5); + +-- Query the size of t_ao. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; + +SELECT pg_table_size('t_ao'); + +-- Query the size of t_aoco. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; + +SELECT pg_table_size('t_aoco'); + +-- 2. Test that we are able to perform quota limit on appendonly tables. +SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); +-- expect success. +INSERT INTO t_ao SELECT generate_series(1, 1000); + +SELECT pg_sleep(5); + +-- expect fail. +INSERT INTO t_ao SELECT generate_series(1, 10); +INSERT INTO t_aoco SELECT generate_series(1, 10); + +DROP TABLE t_ao; +DROP TABLE t_aoco; + +SET search_path TO DEFAULT; +DROP SCHEMA s_appendonly; From 13126d765b44b9cdafd81985974d5b091e10abd4 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Thu, 25 Nov 2021 16:14:07 +0800 Subject: [PATCH 083/330] Support calculate relation size by relfilenode (#87) * Support get_relation_size_by_relfilenode() add unit_test for calculating relation size by relfilenode * Add UDFs to allow checking size of a table Co-authored-by: hzhang2 Co-authored-by: Xing GUO --- diskquota--2.0.sql | 23 +++++++++ diskquota_schedule | 1 + diskquota_schedule_int | 1 + diskquota_utility.c | 86 +++++++++++++++++++++++++++++++++ expected/test_relation_size.out | 69 ++++++++++++++++++++++++++ sql/test_relation_size.sql | 30 ++++++++++++ 6 files changed, 210 insertions(+) create mode 100644 expected/test_relation_size.out create mode 100644 sql/test_relation_size.sql diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index e98ad3f3ac5..cdd304eb430 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -127,3 +127,26 @@ LANGUAGE C VOLATILE; SELECT diskquota.diskquota_start_worker(); DROP FUNCTION diskquota.diskquota_start_worker(); + +-- TODO: support upgrade/downgrade +CREATE OR REPLACE FUNCTION diskquota.relation_size_local( + reltablespace oid, + relfilenode oid, + is_temp boolean) +RETURNS bigint STRICT +AS 'MODULE_PATHNAME', 'relation_size_local' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.relation_size( + relation regclass, + is_temp boolean) +RETURNS bigint STRICT +AS $$ +SELECT sum(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, is_temp) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, is_temp) AS size + FROM pg_class WHERE oid = relation +) AS t +$$ LANGUAGE SQL; diff --git a/diskquota_schedule b/diskquota_schedule index 0caef975e3e..b8ce67003fb 100644 --- a/diskquota_schedule +++ b/diskquota_schedule @@ -1,5 +1,6 @@ test: init test: prepare +test: test_relation_size # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/diskquota_schedule_int b/diskquota_schedule_int index 0183c92a3d5..383ecffea83 100644 --- a/diskquota_schedule_int +++ b/diskquota_schedule_int @@ -1,5 +1,6 @@ test: init test: prepare +test: test_relation_size #test: test_table_size test: test_fast_disk_check test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake diff --git a/diskquota_utility.c b/diskquota_utility.c index e0ef8542d5c..9fa2642f34b 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -18,6 +18,7 @@ #include "postgres.h" #include +#include #include "access/xact.h" #include "catalog/namespace.h" @@ -25,6 +26,7 @@ #include "catalog/pg_collation.h" #include "catalog/pg_database.h" #include "catalog/pg_extension.h" +#include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/extension.h" @@ -62,6 +64,7 @@ PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); PG_FUNCTION_INFO_V1(update_diskquota_db_list); PG_FUNCTION_INFO_V1(set_per_segment_quota); +PG_FUNCTION_INFO_V1(relation_size_local); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 1200 @@ -76,6 +79,7 @@ static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, Quota static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); static char *convert_oidlist_to_string(List *oidlist); +static int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode); int get_ext_major_version(void); List *get_rel_oid_list(void); @@ -1190,3 +1194,85 @@ get_rel_oid_list(void) } return oidlist; } + +/* + * calculate size of (all forks of) a relation in transaction + * This function is following calculate_relation_size() + */ +static int64 +calculate_relation_size_all_forks(RelFileNodeBackend *rnode) +{ + int64 totalsize = 0; + ForkNumber forkNum; + int64 size = 0; + char *relationpath; + char pathname[MAXPGPATH]; + unsigned int segcount = 0; + + PG_TRY(); + { + for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) + { + relationpath = relpathbackend(rnode->node, rnode->backend, forkNum); + size = 0; + + for (segcount = 0;; segcount++) + { + struct stat fst; + + CHECK_FOR_INTERRUPTS(); + + if (segcount == 0) + snprintf(pathname, MAXPGPATH, "%s", + relationpath); + else + snprintf(pathname, MAXPGPATH, "%s.%u", + relationpath, segcount); + + if (stat(pathname, &fst) < 0) + { + if (errno == ENOENT) + break; + else + /* TODO: Do we need this? */ + ereport(ERROR, + (errcode_for_file_access(), + errmsg("[diskquota] could not stat file %s: %m", pathname))); + } + size += fst.st_size; + } + + totalsize += size; + } + } + PG_CATCH(); + { + /* TODO: Record the error message to pg_log */ + HOLD_INTERRUPTS(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + + return totalsize; +} + +Datum +relation_size_local(PG_FUNCTION_ARGS) +{ + Oid reltablespace = PG_GETARG_OID(0); + Oid relfilenode = PG_GETARG_OID(1); + int backend = PG_GETARG_BOOL(2) ? -2 : -1; + RelFileNodeBackend rnode = {0}; + int64 size = 0; + + rnode.node.dbNode = MyDatabaseId; + rnode.node.relNode = relfilenode; + rnode.node.spcNode = OidIsValid(reltablespace) ? + reltablespace : MyDatabaseTableSpace; + rnode.backend = backend; + + size = calculate_relation_size_all_forks(&rnode); + + PG_RETURN_INT64(size); +} diff --git a/expected/test_relation_size.out b/expected/test_relation_size.out new file mode 100644 index 00000000000..a0485b7a5c7 --- /dev/null +++ b/expected/test_relation_size.out @@ -0,0 +1,69 @@ +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1', true); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 688128 +(1 row) + +CREATE TABLE t2(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2', false); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 688128 +(1 row) + +-- start_ignore +\! mkdir /tmp/test_spc +-- end_ignore +DROP TABLESPACE IF EXISTS test_spc; +NOTICE: tablespace "test_spc" does not exist, skipping +CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; +ALTER TABLE t1 SET TABLESPACE test_spc; +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1', true); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 1081344 +(1 row) + +ALTER TABLE t2 SET TABLESPACE test_spc; +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2', false); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 1081344 +(1 row) + +DROP TABLE t1, t2; +DROP TABLESPACE test_spc; +\! rm -rf /tmp/test_spc diff --git a/sql/test_relation_size.sql b/sql/test_relation_size.sql new file mode 100644 index 00000000000..d52f0b95e49 --- /dev/null +++ b/sql/test_relation_size.sql @@ -0,0 +1,30 @@ +CREATE TEMP TABLE t1(i int); +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1', true); +SELECT pg_table_size('t1'); + +CREATE TABLE t2(i int); +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2', false); +SELECT pg_table_size('t2'); + +-- start_ignore +\! mkdir /tmp/test_spc +-- end_ignore +DROP TABLESPACE IF EXISTS test_spc; +CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; + +ALTER TABLE t1 SET TABLESPACE test_spc; +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1', true); +SELECT pg_table_size('t1'); + +ALTER TABLE t2 SET TABLESPACE test_spc; +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2', false); +SELECT pg_table_size('t2'); + + +DROP TABLE t1, t2; +DROP TABLESPACE test_spc; +\! rm -rf /tmp/test_spc From f8f8a08db697a6905b7d2240ad9cddbe2e1c9d96 Mon Sep 17 00:00:00 2001 From: Xing GUO Date: Mon, 29 Nov 2021 14:28:15 +0800 Subject: [PATCH 084/330] Fix testing failure. NFC. Co-authored-by: hzhang2 --- expected/test_relation_size.out | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/expected/test_relation_size.out b/expected/test_relation_size.out index a0485b7a5c7..4828b10d74d 100644 --- a/expected/test_relation_size.out +++ b/expected/test_relation_size.out @@ -33,7 +33,7 @@ SELECT pg_table_size('t2'); -- start_ignore \! mkdir /tmp/test_spc -- end_ignore -DROP TABLESPACE IF EXISTS test_spc; +DROP TABLESPACE IF EXISTS test_spc; NOTICE: tablespace "test_spc" does not exist, skipping CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; ALTER TABLE t1 SET TABLESPACE test_spc; From be0c971667911af11fd4c2fc2cb77c36ee5f34a9 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 29 Nov 2021 15:39:40 +0800 Subject: [PATCH 085/330] Introduce isolation2 test framework to diskquota. (#92) This patch introduces isolation2 test framework to diskquota. We are able to write concurrent test cases for diskquota in future. --- Makefile | 24 ++-------- diskquota_utility.c | 2 + tests/Makefile | 14 ++++++ init_file => tests/init_file | 0 tests/isolation2/.gitignore | 2 + tests/isolation2/Makefile | 12 +++++ .../isolation2/expected/cleanup.out | 0 tests/isolation2/expected/init.out | 16 +++++++ .../isolation2/expected/prepare.out | 0 .../expected/test_relation_size.out | 48 +++++++++++++++++++ tests/isolation2/isolation2_schedule | 4 ++ tests/isolation2/sql/cleanup.sql | 3 ++ tests/isolation2/sql/init.sql | 23 +++++++++ tests/isolation2/sql/prepare.sql | 4 ++ tests/isolation2/sql/test_relation_size.sql | 25 ++++++++++ tests/regress/.gitignore | 1 + tests/regress/Makefile | 21 ++++++++ {data => tests/regress/data}/csmall.txt | 0 .../regress/diskquota_schedule | 0 .../regress/diskquota_schedule_int | 0 .../regress/expected}/clean.out | 0 tests/regress/expected/dummy.out | 0 {expected => tests/regress/expected}/init.out | 0 .../regress/expected}/prepare.out | 0 .../regress/expected}/test_appendonly.out | 0 .../regress/expected}/test_column.out | 0 .../regress/expected}/test_copy.out | 0 .../regress/expected}/test_delete_quota.out | 0 .../regress/expected}/test_drop_table.out | 0 .../regress/expected}/test_extension.out | 0 .../expected}/test_fast_disk_check.out | 0 .../expected}/test_fetch_table_stat.out | 0 .../regress/expected}/test_index.out | 0 .../expected}/test_insert_after_drop.out | 0 .../expected}/test_many_active_tables.out | 0 .../regress/expected}/test_manytable.out | 0 .../regress/expected}/test_mistake.out | 0 .../regress/expected}/test_partition.out | 0 .../expected}/test_pause_and_resume.out | 0 .../expected}/test_primary_failure.out | 0 .../regress/expected}/test_relation_size.out | 0 .../regress/expected}/test_rename.out | 0 .../regress/expected}/test_reschema.out | 0 .../regress/expected}/test_role.out | 0 .../regress/expected}/test_schema.out | 0 .../regress/expected}/test_table_size.out | 0 .../expected}/test_tablespace_role.out | 0 .../expected}/test_tablespace_role_perseg.out | 0 .../expected}/test_tablespace_schema.out | 0 .../test_tablespace_schema_perseg.out | 0 .../regress/expected}/test_temp_role.out | 0 .../regress/expected}/test_toast.out | 0 .../regress/expected}/test_truncate.out | 0 .../regress/expected}/test_update.out | 0 .../regress/expected}/test_vacuum.out | 0 {sql => tests/regress/sql}/clean.sql | 0 tests/regress/sql/dummy.sql | 0 {sql => tests/regress/sql}/init.sql | 0 {sql => tests/regress/sql}/prepare.sql | 0 .../regress/sql}/test_appendonly.sql | 0 {sql => tests/regress/sql}/test_column.sql | 0 {sql => tests/regress/sql}/test_copy.sql | 0 .../regress/sql}/test_delete_quota.sql | 0 .../regress/sql}/test_drop_table.sql | 0 {sql => tests/regress/sql}/test_extension.sql | 0 .../regress/sql}/test_fast_disk_check.sql | 0 .../regress/sql}/test_fetch_table_stat.sql | 0 {sql => tests/regress/sql}/test_index.sql | 0 .../regress/sql}/test_insert_after_drop.sql | 0 .../regress/sql}/test_many_active_tables.sql | 0 {sql => tests/regress/sql}/test_manytable.sql | 0 {sql => tests/regress/sql}/test_mistake.sql | 0 {sql => tests/regress/sql}/test_partition.sql | 0 .../regress/sql}/test_pause_and_resume.sql | 0 .../regress/sql}/test_primary_failure.sql | 0 .../regress/sql}/test_relation_size.sql | 0 {sql => tests/regress/sql}/test_rename.sql | 0 {sql => tests/regress/sql}/test_reschema.sql | 0 {sql => tests/regress/sql}/test_role.sql | 0 {sql => tests/regress/sql}/test_schema.sql | 0 .../regress/sql}/test_table_size.sql | 0 .../regress/sql}/test_tablespace_role.sql | 0 .../sql}/test_tablespace_role_perseg.sql | 0 .../regress/sql}/test_tablespace_schema.sql | 0 .../sql}/test_tablespace_schema_perseg.sql | 0 {sql => tests/regress/sql}/test_temp_role.sql | 0 {sql => tests/regress/sql}/test_toast.sql | 0 {sql => tests/regress/sql}/test_truncate.sql | 0 {sql => tests/regress/sql}/test_update.sql | 0 {sql => tests/regress/sql}/test_vacuum.sql | 0 90 files changed, 180 insertions(+), 19 deletions(-) create mode 100644 tests/Makefile rename init_file => tests/init_file (100%) create mode 100644 tests/isolation2/.gitignore create mode 100644 tests/isolation2/Makefile rename expected/dummy.out => tests/isolation2/expected/cleanup.out (100%) create mode 100644 tests/isolation2/expected/init.out rename sql/dummy.sql => tests/isolation2/expected/prepare.out (100%) create mode 100644 tests/isolation2/expected/test_relation_size.out create mode 100644 tests/isolation2/isolation2_schedule create mode 100644 tests/isolation2/sql/cleanup.sql create mode 100644 tests/isolation2/sql/init.sql create mode 100644 tests/isolation2/sql/prepare.sql create mode 100644 tests/isolation2/sql/test_relation_size.sql create mode 100644 tests/regress/.gitignore create mode 100644 tests/regress/Makefile rename {data => tests/regress/data}/csmall.txt (100%) rename diskquota_schedule => tests/regress/diskquota_schedule (100%) rename diskquota_schedule_int => tests/regress/diskquota_schedule_int (100%) rename {expected => tests/regress/expected}/clean.out (100%) create mode 100644 tests/regress/expected/dummy.out rename {expected => tests/regress/expected}/init.out (100%) rename {expected => tests/regress/expected}/prepare.out (100%) rename {expected => tests/regress/expected}/test_appendonly.out (100%) rename {expected => tests/regress/expected}/test_column.out (100%) rename {expected => tests/regress/expected}/test_copy.out (100%) rename {expected => tests/regress/expected}/test_delete_quota.out (100%) rename {expected => tests/regress/expected}/test_drop_table.out (100%) rename {expected => tests/regress/expected}/test_extension.out (100%) rename {expected => tests/regress/expected}/test_fast_disk_check.out (100%) rename {expected => tests/regress/expected}/test_fetch_table_stat.out (100%) rename {expected => tests/regress/expected}/test_index.out (100%) rename {expected => tests/regress/expected}/test_insert_after_drop.out (100%) rename {expected => tests/regress/expected}/test_many_active_tables.out (100%) rename {expected => tests/regress/expected}/test_manytable.out (100%) rename {expected => tests/regress/expected}/test_mistake.out (100%) rename {expected => tests/regress/expected}/test_partition.out (100%) rename {expected => tests/regress/expected}/test_pause_and_resume.out (100%) rename {expected => tests/regress/expected}/test_primary_failure.out (100%) rename {expected => tests/regress/expected}/test_relation_size.out (100%) rename {expected => tests/regress/expected}/test_rename.out (100%) rename {expected => tests/regress/expected}/test_reschema.out (100%) rename {expected => tests/regress/expected}/test_role.out (100%) rename {expected => tests/regress/expected}/test_schema.out (100%) rename {expected => tests/regress/expected}/test_table_size.out (100%) rename {expected => tests/regress/expected}/test_tablespace_role.out (100%) rename {expected => tests/regress/expected}/test_tablespace_role_perseg.out (100%) rename {expected => tests/regress/expected}/test_tablespace_schema.out (100%) rename {expected => tests/regress/expected}/test_tablespace_schema_perseg.out (100%) rename {expected => tests/regress/expected}/test_temp_role.out (100%) rename {expected => tests/regress/expected}/test_toast.out (100%) rename {expected => tests/regress/expected}/test_truncate.out (100%) rename {expected => tests/regress/expected}/test_update.out (100%) rename {expected => tests/regress/expected}/test_vacuum.out (100%) rename {sql => tests/regress/sql}/clean.sql (100%) create mode 100644 tests/regress/sql/dummy.sql rename {sql => tests/regress/sql}/init.sql (100%) rename {sql => tests/regress/sql}/prepare.sql (100%) rename {sql => tests/regress/sql}/test_appendonly.sql (100%) rename {sql => tests/regress/sql}/test_column.sql (100%) rename {sql => tests/regress/sql}/test_copy.sql (100%) rename {sql => tests/regress/sql}/test_delete_quota.sql (100%) rename {sql => tests/regress/sql}/test_drop_table.sql (100%) rename {sql => tests/regress/sql}/test_extension.sql (100%) rename {sql => tests/regress/sql}/test_fast_disk_check.sql (100%) rename {sql => tests/regress/sql}/test_fetch_table_stat.sql (100%) rename {sql => tests/regress/sql}/test_index.sql (100%) rename {sql => tests/regress/sql}/test_insert_after_drop.sql (100%) rename {sql => tests/regress/sql}/test_many_active_tables.sql (100%) rename {sql => tests/regress/sql}/test_manytable.sql (100%) rename {sql => tests/regress/sql}/test_mistake.sql (100%) rename {sql => tests/regress/sql}/test_partition.sql (100%) rename {sql => tests/regress/sql}/test_pause_and_resume.sql (100%) rename {sql => tests/regress/sql}/test_primary_failure.sql (100%) rename {sql => tests/regress/sql}/test_relation_size.sql (100%) rename {sql => tests/regress/sql}/test_rename.sql (100%) rename {sql => tests/regress/sql}/test_reschema.sql (100%) rename {sql => tests/regress/sql}/test_role.sql (100%) rename {sql => tests/regress/sql}/test_schema.sql (100%) rename {sql => tests/regress/sql}/test_table_size.sql (100%) rename {sql => tests/regress/sql}/test_tablespace_role.sql (100%) rename {sql => tests/regress/sql}/test_tablespace_role_perseg.sql (100%) rename {sql => tests/regress/sql}/test_tablespace_schema.sql (100%) rename {sql => tests/regress/sql}/test_tablespace_schema_perseg.sql (100%) rename {sql => tests/regress/sql}/test_temp_role.sql (100%) rename {sql => tests/regress/sql}/test_toast.sql (100%) rename {sql => tests/regress/sql}/test_truncate.sql (100%) rename {sql => tests/regress/sql}/test_update.sql (100%) rename {sql => tests/regress/sql}/test_vacuum.sql (100%) diff --git a/Makefile b/Makefile index 920a1456dc9..e7dd111f901 100644 --- a/Makefile +++ b/Makefile @@ -10,24 +10,10 @@ OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -REGRESS = dummy -ifeq ("$(INTEGRATION_TEST)","y") -REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=init_file -else -REGRESS_OPTS = --schedule=diskquota_schedule --init-file=init_file -endif - -# FIXME: This check is hacky, since test_fetch_table_stat relies on the -# gp_inject_fault extension, we detect if the extension is built with -# greenplum by checking the output of the command 'pg_config --configure'. -# In the future, if the diskquota is built with GPDB7, or we backport the -# commit below to 6X_STABLE, we don't need this check. -# https://github.com/greenplum-db/gpdb/commit/8b897b12f6cb13753985faacab8e4053bf797a8b -ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) -REGRESS_OPTS += --load-extension=gp_inject_fault -else -REGRESS_OPTS += --exclude-tests=test_fetch_table_stat -endif - PGXS := $(shell pg_config --pgxs) include $(PGXS) + +.PHONY: installcheck +installcheck: + $(MAKE) -C tests installcheck-regress + $(MAKE) -C tests installcheck-isolation2 diff --git a/diskquota_utility.c b/diskquota_utility.c index 9fa2642f34b..0253b1fe377 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -37,6 +37,7 @@ #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/faultinjector.h" #include "utils/fmgroids.h" #include "utils/formatting.h" #include "utils/memutils.h" @@ -1229,6 +1230,7 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode) snprintf(pathname, MAXPGPATH, "%s.%u", relationpath, segcount); + SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); if (stat(pathname, &fst) < 0) { if (errno == ENOENT) diff --git a/tests/Makefile b/tests/Makefile new file mode 100644 index 00000000000..0c07a6fea6b --- /dev/null +++ b/tests/Makefile @@ -0,0 +1,14 @@ +.PHONY: installcheck-regress +installcheck-regress: + $(MAKE) -C regress installcheck + +## Check whether we are able to run isolation2 tests. +## If the gp_inject_fault extension is not enabled, we emit a warning message for that. +.PHONY: installcheck-isolation2 +installcheck-isolation2: +ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) + $(MAKE) -C isolation2 installcheck +else + @echo -e "\033[0;33mThe gp_inject_fault extension is not enabled in the current build of Greenplum, isolation2 tests will not run." + @echo -e "To enable gp_inject_fault extension, append --enable-debug-extensions option to ./configure\033[0m" +endif diff --git a/init_file b/tests/init_file similarity index 100% rename from init_file rename to tests/init_file diff --git a/tests/isolation2/.gitignore b/tests/isolation2/.gitignore new file mode 100644 index 00000000000..dee11c2df0e --- /dev/null +++ b/tests/isolation2/.gitignore @@ -0,0 +1,2 @@ +sql_isolation_testcase.* +results/* diff --git a/tests/isolation2/Makefile b/tests/isolation2/Makefile new file mode 100644 index 00000000000..b96bb1c22d1 --- /dev/null +++ b/tests/isolation2/Makefile @@ -0,0 +1,12 @@ +PGXS := $(shell pg_config --pgxs) +include $(PGXS) + +## Build pg_isolation2_regress and install auxiliary scripts to the correct locations. +.PHONY: pg_isolation2_regress +pg_isolation2_regress: + $(MAKE) -C $(abs_top_srcdir)/src/test/isolation2 install + cp $(abs_top_srcdir)/src/test/isolation2/sql_isolation_testcase.py ./ + +.PHONY: installcheck +installcheck: pg_isolation2_regress + $(abs_top_srcdir)/src/test/isolation2/pg_isolation2_regress --init-file=../init_file --psqldir=$(PSQLDIR) --inputdir=./sql --schedule=./isolation2_schedule --load-extension=gp_inject_fault diff --git a/expected/dummy.out b/tests/isolation2/expected/cleanup.out similarity index 100% rename from expected/dummy.out rename to tests/isolation2/expected/cleanup.out diff --git a/tests/isolation2/expected/init.out b/tests/isolation2/expected/init.out new file mode 100644 index 00000000000..84b31e5e8a3 --- /dev/null +++ b/tests/isolation2/expected/init.out @@ -0,0 +1,16 @@ +-- start_ignore +CREATE DATABASE diskquota; +CREATE +-- end_ignore + +-- start_ignore +\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null -- end_ignore +\! echo $? -- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null -- end_ignore +\! echo $? -- start_ignore +\! gpconfig -c max_worker_processes -v 20 > /dev/null -- end_ignore +\! echo $? +-- start_ignore +\! gpstop -raf > /dev/null -- end_ignore +\! echo $? +\! sleep 10 diff --git a/sql/dummy.sql b/tests/isolation2/expected/prepare.out similarity index 100% rename from sql/dummy.sql rename to tests/isolation2/expected/prepare.out diff --git a/tests/isolation2/expected/test_relation_size.out b/tests/isolation2/expected/test_relation_size.out new file mode 100644 index 00000000000..e9d620d6f17 --- /dev/null +++ b/tests/isolation2/expected/test_relation_size.out @@ -0,0 +1,48 @@ +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dummy_rel(i int); +CREATE +-- Insert a small amount of data to 't_dummy_rel'. +INSERT INTO t_dummy_rel SELECT generate_series(1, 100); +INSERT 100 +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dummy_rel', false); + relation_size +--------------- + 98304 +(1 row) + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p'; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: + Success: +(4 rows) + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dummy_rel', false); +-- Drop the table. +DROP TABLE t_dummy_rel; +DROP +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p'; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: + Success: +(4 rows) +-- Session 1 will continue and returns 0. +1<: <... completed> + relation_size +--------------- + 0 +(1 row) diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule new file mode 100644 index 00000000000..b36d77dda18 --- /dev/null +++ b/tests/isolation2/isolation2_schedule @@ -0,0 +1,4 @@ +test: init +test: prepare +test: test_relation_size +test: cleanup diff --git a/tests/isolation2/sql/cleanup.sql b/tests/isolation2/sql/cleanup.sql new file mode 100644 index 00000000000..b569b6cce8b --- /dev/null +++ b/tests/isolation2/sql/cleanup.sql @@ -0,0 +1,3 @@ +-- start_ignore +DROP EXTENSION diskquota; +-- end_ignore diff --git a/tests/isolation2/sql/init.sql b/tests/isolation2/sql/init.sql new file mode 100644 index 00000000000..5c57c4bbdcd --- /dev/null +++ b/tests/isolation2/sql/init.sql @@ -0,0 +1,23 @@ +-- start_ignore +CREATE DATABASE diskquota; +-- end_ignore + +-- start_ignore +\! gpconfig -c shared_preload_libraries -v 'diskquota' > /dev/null +-- end_ignore +\! echo $? +-- start_ignore +\! gpconfig -c diskquota.naptime -v 2 > /dev/null +-- end_ignore +\! echo $? +-- start_ignore +\! gpconfig -c max_worker_processes -v 20 > /dev/null +-- end_ignore +\! echo $? + +-- start_ignore +\! gpstop -raf > /dev/null +-- end_ignore +\! echo $? + +\! sleep 10 diff --git a/tests/isolation2/sql/prepare.sql b/tests/isolation2/sql/prepare.sql new file mode 100644 index 00000000000..220ed13e887 --- /dev/null +++ b/tests/isolation2/sql/prepare.sql @@ -0,0 +1,4 @@ +-- start_ignore +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); +-- end_ignore diff --git a/tests/isolation2/sql/test_relation_size.sql b/tests/isolation2/sql/test_relation_size.sql new file mode 100644 index 00000000000..735fa3cc3c1 --- /dev/null +++ b/tests/isolation2/sql/test_relation_size.sql @@ -0,0 +1,25 @@ +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dummy_rel(i int); +-- Insert a small amount of data to 't_dummy_rel'. +INSERT INTO t_dummy_rel SELECT generate_series(1, 100); +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dummy_rel', false); + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p'; + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dummy_rel', false); +-- Drop the table. +DROP TABLE t_dummy_rel; +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p'; +-- Session 1 will continue and returns 0. +1<: diff --git a/tests/regress/.gitignore b/tests/regress/.gitignore new file mode 100644 index 00000000000..484ab7e5c61 --- /dev/null +++ b/tests/regress/.gitignore @@ -0,0 +1 @@ +results/* diff --git a/tests/regress/Makefile b/tests/regress/Makefile new file mode 100644 index 00000000000..ca9f369c674 --- /dev/null +++ b/tests/regress/Makefile @@ -0,0 +1,21 @@ +REGRESS = dummy +ifeq ("$(INTEGRATION_TEST)","y") +REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=../init_file +else +REGRESS_OPTS = --schedule=diskquota_schedule --init-file=../init_file +endif + +# FIXME: This check is hacky, since test_fetch_table_stat relies on the +# gp_inject_fault extension, we detect if the extension is built with +# greenplum by checking the output of the command 'pg_config --configure'. +# In the future, if the diskquota is built with GPDB7, or we backport the +# commit below to 6X_STABLE, we don't need this check. +# https://github.com/greenplum-db/gpdb/commit/8b897b12f6cb13753985faacab8e4053bf797a8b +ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) +REGRESS_OPTS += --load-extension=gp_inject_fault +else +REGRESS_OPTS += --exclude-tests=test_fetch_table_stat +endif + +PGXS := $(shell pg_config --pgxs) +include $(PGXS) diff --git a/data/csmall.txt b/tests/regress/data/csmall.txt similarity index 100% rename from data/csmall.txt rename to tests/regress/data/csmall.txt diff --git a/diskquota_schedule b/tests/regress/diskquota_schedule similarity index 100% rename from diskquota_schedule rename to tests/regress/diskquota_schedule diff --git a/diskquota_schedule_int b/tests/regress/diskquota_schedule_int similarity index 100% rename from diskquota_schedule_int rename to tests/regress/diskquota_schedule_int diff --git a/expected/clean.out b/tests/regress/expected/clean.out similarity index 100% rename from expected/clean.out rename to tests/regress/expected/clean.out diff --git a/tests/regress/expected/dummy.out b/tests/regress/expected/dummy.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/expected/init.out b/tests/regress/expected/init.out similarity index 100% rename from expected/init.out rename to tests/regress/expected/init.out diff --git a/expected/prepare.out b/tests/regress/expected/prepare.out similarity index 100% rename from expected/prepare.out rename to tests/regress/expected/prepare.out diff --git a/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out similarity index 100% rename from expected/test_appendonly.out rename to tests/regress/expected/test_appendonly.out diff --git a/expected/test_column.out b/tests/regress/expected/test_column.out similarity index 100% rename from expected/test_column.out rename to tests/regress/expected/test_column.out diff --git a/expected/test_copy.out b/tests/regress/expected/test_copy.out similarity index 100% rename from expected/test_copy.out rename to tests/regress/expected/test_copy.out diff --git a/expected/test_delete_quota.out b/tests/regress/expected/test_delete_quota.out similarity index 100% rename from expected/test_delete_quota.out rename to tests/regress/expected/test_delete_quota.out diff --git a/expected/test_drop_table.out b/tests/regress/expected/test_drop_table.out similarity index 100% rename from expected/test_drop_table.out rename to tests/regress/expected/test_drop_table.out diff --git a/expected/test_extension.out b/tests/regress/expected/test_extension.out similarity index 100% rename from expected/test_extension.out rename to tests/regress/expected/test_extension.out diff --git a/expected/test_fast_disk_check.out b/tests/regress/expected/test_fast_disk_check.out similarity index 100% rename from expected/test_fast_disk_check.out rename to tests/regress/expected/test_fast_disk_check.out diff --git a/expected/test_fetch_table_stat.out b/tests/regress/expected/test_fetch_table_stat.out similarity index 100% rename from expected/test_fetch_table_stat.out rename to tests/regress/expected/test_fetch_table_stat.out diff --git a/expected/test_index.out b/tests/regress/expected/test_index.out similarity index 100% rename from expected/test_index.out rename to tests/regress/expected/test_index.out diff --git a/expected/test_insert_after_drop.out b/tests/regress/expected/test_insert_after_drop.out similarity index 100% rename from expected/test_insert_after_drop.out rename to tests/regress/expected/test_insert_after_drop.out diff --git a/expected/test_many_active_tables.out b/tests/regress/expected/test_many_active_tables.out similarity index 100% rename from expected/test_many_active_tables.out rename to tests/regress/expected/test_many_active_tables.out diff --git a/expected/test_manytable.out b/tests/regress/expected/test_manytable.out similarity index 100% rename from expected/test_manytable.out rename to tests/regress/expected/test_manytable.out diff --git a/expected/test_mistake.out b/tests/regress/expected/test_mistake.out similarity index 100% rename from expected/test_mistake.out rename to tests/regress/expected/test_mistake.out diff --git a/expected/test_partition.out b/tests/regress/expected/test_partition.out similarity index 100% rename from expected/test_partition.out rename to tests/regress/expected/test_partition.out diff --git a/expected/test_pause_and_resume.out b/tests/regress/expected/test_pause_and_resume.out similarity index 100% rename from expected/test_pause_and_resume.out rename to tests/regress/expected/test_pause_and_resume.out diff --git a/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out similarity index 100% rename from expected/test_primary_failure.out rename to tests/regress/expected/test_primary_failure.out diff --git a/expected/test_relation_size.out b/tests/regress/expected/test_relation_size.out similarity index 100% rename from expected/test_relation_size.out rename to tests/regress/expected/test_relation_size.out diff --git a/expected/test_rename.out b/tests/regress/expected/test_rename.out similarity index 100% rename from expected/test_rename.out rename to tests/regress/expected/test_rename.out diff --git a/expected/test_reschema.out b/tests/regress/expected/test_reschema.out similarity index 100% rename from expected/test_reschema.out rename to tests/regress/expected/test_reschema.out diff --git a/expected/test_role.out b/tests/regress/expected/test_role.out similarity index 100% rename from expected/test_role.out rename to tests/regress/expected/test_role.out diff --git a/expected/test_schema.out b/tests/regress/expected/test_schema.out similarity index 100% rename from expected/test_schema.out rename to tests/regress/expected/test_schema.out diff --git a/expected/test_table_size.out b/tests/regress/expected/test_table_size.out similarity index 100% rename from expected/test_table_size.out rename to tests/regress/expected/test_table_size.out diff --git a/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out similarity index 100% rename from expected/test_tablespace_role.out rename to tests/regress/expected/test_tablespace_role.out diff --git a/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out similarity index 100% rename from expected/test_tablespace_role_perseg.out rename to tests/regress/expected/test_tablespace_role_perseg.out diff --git a/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out similarity index 100% rename from expected/test_tablespace_schema.out rename to tests/regress/expected/test_tablespace_schema.out diff --git a/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out similarity index 100% rename from expected/test_tablespace_schema_perseg.out rename to tests/regress/expected/test_tablespace_schema_perseg.out diff --git a/expected/test_temp_role.out b/tests/regress/expected/test_temp_role.out similarity index 100% rename from expected/test_temp_role.out rename to tests/regress/expected/test_temp_role.out diff --git a/expected/test_toast.out b/tests/regress/expected/test_toast.out similarity index 100% rename from expected/test_toast.out rename to tests/regress/expected/test_toast.out diff --git a/expected/test_truncate.out b/tests/regress/expected/test_truncate.out similarity index 100% rename from expected/test_truncate.out rename to tests/regress/expected/test_truncate.out diff --git a/expected/test_update.out b/tests/regress/expected/test_update.out similarity index 100% rename from expected/test_update.out rename to tests/regress/expected/test_update.out diff --git a/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out similarity index 100% rename from expected/test_vacuum.out rename to tests/regress/expected/test_vacuum.out diff --git a/sql/clean.sql b/tests/regress/sql/clean.sql similarity index 100% rename from sql/clean.sql rename to tests/regress/sql/clean.sql diff --git a/tests/regress/sql/dummy.sql b/tests/regress/sql/dummy.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sql/init.sql b/tests/regress/sql/init.sql similarity index 100% rename from sql/init.sql rename to tests/regress/sql/init.sql diff --git a/sql/prepare.sql b/tests/regress/sql/prepare.sql similarity index 100% rename from sql/prepare.sql rename to tests/regress/sql/prepare.sql diff --git a/sql/test_appendonly.sql b/tests/regress/sql/test_appendonly.sql similarity index 100% rename from sql/test_appendonly.sql rename to tests/regress/sql/test_appendonly.sql diff --git a/sql/test_column.sql b/tests/regress/sql/test_column.sql similarity index 100% rename from sql/test_column.sql rename to tests/regress/sql/test_column.sql diff --git a/sql/test_copy.sql b/tests/regress/sql/test_copy.sql similarity index 100% rename from sql/test_copy.sql rename to tests/regress/sql/test_copy.sql diff --git a/sql/test_delete_quota.sql b/tests/regress/sql/test_delete_quota.sql similarity index 100% rename from sql/test_delete_quota.sql rename to tests/regress/sql/test_delete_quota.sql diff --git a/sql/test_drop_table.sql b/tests/regress/sql/test_drop_table.sql similarity index 100% rename from sql/test_drop_table.sql rename to tests/regress/sql/test_drop_table.sql diff --git a/sql/test_extension.sql b/tests/regress/sql/test_extension.sql similarity index 100% rename from sql/test_extension.sql rename to tests/regress/sql/test_extension.sql diff --git a/sql/test_fast_disk_check.sql b/tests/regress/sql/test_fast_disk_check.sql similarity index 100% rename from sql/test_fast_disk_check.sql rename to tests/regress/sql/test_fast_disk_check.sql diff --git a/sql/test_fetch_table_stat.sql b/tests/regress/sql/test_fetch_table_stat.sql similarity index 100% rename from sql/test_fetch_table_stat.sql rename to tests/regress/sql/test_fetch_table_stat.sql diff --git a/sql/test_index.sql b/tests/regress/sql/test_index.sql similarity index 100% rename from sql/test_index.sql rename to tests/regress/sql/test_index.sql diff --git a/sql/test_insert_after_drop.sql b/tests/regress/sql/test_insert_after_drop.sql similarity index 100% rename from sql/test_insert_after_drop.sql rename to tests/regress/sql/test_insert_after_drop.sql diff --git a/sql/test_many_active_tables.sql b/tests/regress/sql/test_many_active_tables.sql similarity index 100% rename from sql/test_many_active_tables.sql rename to tests/regress/sql/test_many_active_tables.sql diff --git a/sql/test_manytable.sql b/tests/regress/sql/test_manytable.sql similarity index 100% rename from sql/test_manytable.sql rename to tests/regress/sql/test_manytable.sql diff --git a/sql/test_mistake.sql b/tests/regress/sql/test_mistake.sql similarity index 100% rename from sql/test_mistake.sql rename to tests/regress/sql/test_mistake.sql diff --git a/sql/test_partition.sql b/tests/regress/sql/test_partition.sql similarity index 100% rename from sql/test_partition.sql rename to tests/regress/sql/test_partition.sql diff --git a/sql/test_pause_and_resume.sql b/tests/regress/sql/test_pause_and_resume.sql similarity index 100% rename from sql/test_pause_and_resume.sql rename to tests/regress/sql/test_pause_and_resume.sql diff --git a/sql/test_primary_failure.sql b/tests/regress/sql/test_primary_failure.sql similarity index 100% rename from sql/test_primary_failure.sql rename to tests/regress/sql/test_primary_failure.sql diff --git a/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql similarity index 100% rename from sql/test_relation_size.sql rename to tests/regress/sql/test_relation_size.sql diff --git a/sql/test_rename.sql b/tests/regress/sql/test_rename.sql similarity index 100% rename from sql/test_rename.sql rename to tests/regress/sql/test_rename.sql diff --git a/sql/test_reschema.sql b/tests/regress/sql/test_reschema.sql similarity index 100% rename from sql/test_reschema.sql rename to tests/regress/sql/test_reschema.sql diff --git a/sql/test_role.sql b/tests/regress/sql/test_role.sql similarity index 100% rename from sql/test_role.sql rename to tests/regress/sql/test_role.sql diff --git a/sql/test_schema.sql b/tests/regress/sql/test_schema.sql similarity index 100% rename from sql/test_schema.sql rename to tests/regress/sql/test_schema.sql diff --git a/sql/test_table_size.sql b/tests/regress/sql/test_table_size.sql similarity index 100% rename from sql/test_table_size.sql rename to tests/regress/sql/test_table_size.sql diff --git a/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql similarity index 100% rename from sql/test_tablespace_role.sql rename to tests/regress/sql/test_tablespace_role.sql diff --git a/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql similarity index 100% rename from sql/test_tablespace_role_perseg.sql rename to tests/regress/sql/test_tablespace_role_perseg.sql diff --git a/sql/test_tablespace_schema.sql b/tests/regress/sql/test_tablespace_schema.sql similarity index 100% rename from sql/test_tablespace_schema.sql rename to tests/regress/sql/test_tablespace_schema.sql diff --git a/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql similarity index 100% rename from sql/test_tablespace_schema_perseg.sql rename to tests/regress/sql/test_tablespace_schema_perseg.sql diff --git a/sql/test_temp_role.sql b/tests/regress/sql/test_temp_role.sql similarity index 100% rename from sql/test_temp_role.sql rename to tests/regress/sql/test_temp_role.sql diff --git a/sql/test_toast.sql b/tests/regress/sql/test_toast.sql similarity index 100% rename from sql/test_toast.sql rename to tests/regress/sql/test_toast.sql diff --git a/sql/test_truncate.sql b/tests/regress/sql/test_truncate.sql similarity index 100% rename from sql/test_truncate.sql rename to tests/regress/sql/test_truncate.sql diff --git a/sql/test_update.sql b/tests/regress/sql/test_update.sql similarity index 100% rename from sql/test_update.sql rename to tests/regress/sql/test_update.sql diff --git a/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql similarity index 100% rename from sql/test_vacuum.sql rename to tests/regress/sql/test_vacuum.sql From 8b260f1925e26efbf7dba408d07433f4b71ffa6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Tue, 30 Nov 2021 10:47:43 +0800 Subject: [PATCH 086/330] Fix incorrect relation size for AO tables (#93) * Fix incorrect relation size for AO tables * Rename function to make it more consistent * Remove try-catch when stat a file * Add concurrent write test case for AO table --- diskquota--2.0.sql | 10 +- diskquota_utility.c | 106 ++++++++++-------- .../expected/test_relation_size.out | 43 ++++++- tests/isolation2/sql/test_relation_size.sql | 27 ++++- tests/regress/expected/test_relation_size.out | 41 ++++++- tests/regress/sql/test_relation_size.sql | 20 +++- 6 files changed, 172 insertions(+), 75 deletions(-) diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index cdd304eb430..fe99074d702 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -132,21 +132,21 @@ DROP FUNCTION diskquota.diskquota_start_worker(); CREATE OR REPLACE FUNCTION diskquota.relation_size_local( reltablespace oid, relfilenode oid, - is_temp boolean) + relpersistence "char", + relstorage "char") RETURNS bigint STRICT AS 'MODULE_PATHNAME', 'relation_size_local' LANGUAGE C; CREATE OR REPLACE FUNCTION diskquota.relation_size( - relation regclass, - is_temp boolean) + relation regclass) RETURNS bigint STRICT AS $$ SELECT sum(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, is_temp) AS size + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size FROM gp_dist_random('pg_class') WHERE oid = relation UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, is_temp) AS size + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size FROM pg_class WHERE oid = relation ) AS t $$ LANGUAGE SQL; diff --git a/diskquota_utility.c b/diskquota_utility.c index 0253b1fe377..668c3716cfc 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -20,6 +20,7 @@ #include #include +#include "access/aomd.h" #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" @@ -80,7 +81,7 @@ static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, Quota static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); static char *convert_oidlist_to_string(List *oidlist); -static int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode); +static int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); int get_ext_major_version(void); List *get_rel_oid_list(void); @@ -1196,67 +1197,74 @@ get_rel_oid_list(void) return oidlist; } +typedef struct +{ + char *relation_path; + int64 size; +} RelationFileStatCtx; + +static bool +relation_file_stat(int segno, void *ctx) +{ + RelationFileStatCtx *stat_ctx = (RelationFileStatCtx *)ctx; + char file_path[MAXPGPATH] = {0}; + if (segno == 0) + snprintf(file_path, MAXPGPATH, "%s", stat_ctx->relation_path); + else + snprintf(file_path, MAXPGPATH, "%s.%u", stat_ctx->relation_path, segno); + struct stat fst; + SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); + if (stat(file_path, &fst) < 0) + { + if (errno != ENOENT) + ereport(WARNING, + (errcode_for_file_access(), + errmsg("[diskquota] could not stat file %s: %m", file_path))); + return false; + } + stat_ctx->size += fst.st_size; + return true; +} + /* * calculate size of (all forks of) a relation in transaction * This function is following calculate_relation_size() */ static int64 -calculate_relation_size_all_forks(RelFileNodeBackend *rnode) +calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) { int64 totalsize = 0; ForkNumber forkNum; - int64 size = 0; - char *relationpath; - char pathname[MAXPGPATH]; - unsigned int segcount = 0; + unsigned int segno = 0; - PG_TRY(); + if (relstorage == RELSTORAGE_HEAP) { for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) { - relationpath = relpathbackend(rnode->node, rnode->backend, forkNum); - size = 0; - - for (segcount = 0;; segcount++) + RelationFileStatCtx ctx = {0}; + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, forkNum); + ctx.size = 0; + for (segno = 0; ; segno++) { - struct stat fst; - - CHECK_FOR_INTERRUPTS(); - - if (segcount == 0) - snprintf(pathname, MAXPGPATH, "%s", - relationpath); - else - snprintf(pathname, MAXPGPATH, "%s.%u", - relationpath, segcount); - - SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); - if (stat(pathname, &fst) < 0) - { - if (errno == ENOENT) - break; - else - /* TODO: Do we need this? */ - ereport(ERROR, - (errcode_for_file_access(), - errmsg("[diskquota] could not stat file %s: %m", pathname))); - } - size += fst.st_size; + if (!relation_file_stat(segno, &ctx)) + break; } - - totalsize += size; + totalsize += ctx.size; } + return totalsize; + } + else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) + { + RelationFileStatCtx ctx = {0}; + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); + ctx.size = 0; + ao_foreach_extent_file(relation_file_stat, &ctx); + return ctx.size; } - PG_CATCH(); + else { - /* TODO: Record the error message to pg_log */ - HOLD_INTERRUPTS(); - FlushErrorState(); - RESUME_INTERRUPTS(); + return 0; } - PG_END_TRY(); - - return totalsize; } Datum @@ -1264,17 +1272,17 @@ relation_size_local(PG_FUNCTION_ARGS) { Oid reltablespace = PG_GETARG_OID(0); Oid relfilenode = PG_GETARG_OID(1); - int backend = PG_GETARG_BOOL(2) ? -2 : -1; + char relpersistence = PG_GETARG_CHAR(2); + char relstorage = PG_GETARG_CHAR(3); RelFileNodeBackend rnode = {0}; int64 size = 0; rnode.node.dbNode = MyDatabaseId; rnode.node.relNode = relfilenode; - rnode.node.spcNode = OidIsValid(reltablespace) ? - reltablespace : MyDatabaseTableSpace; - rnode.backend = backend; + rnode.node.spcNode = OidIsValid(reltablespace) ? reltablespace : MyDatabaseTableSpace; + rnode.backend = relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; - size = calculate_relation_size_all_forks(&rnode); + size = calculate_relation_size_all_forks(&rnode, relstorage); PG_RETURN_INT64(size); } diff --git a/tests/isolation2/expected/test_relation_size.out b/tests/isolation2/expected/test_relation_size.out index e9d620d6f17..387f86555ae 100644 --- a/tests/isolation2/expected/test_relation_size.out +++ b/tests/isolation2/expected/test_relation_size.out @@ -4,13 +4,13 @@ -- the error and returns 0. -- -CREATE TABLE t_dummy_rel(i int); +CREATE TABLE t_dropped(i int); CREATE --- Insert a small amount of data to 't_dummy_rel'. -INSERT INTO t_dummy_rel SELECT generate_series(1, 100); +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); INSERT 100 -- Shows that the size of relfilenode is not zero. -SELECT diskquota.relation_size('t_dummy_rel', false); +SELECT diskquota.relation_size('t_dropped'); relation_size --------------- 98304 @@ -27,9 +27,9 @@ SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', (4 rows) -- Session 1 will hang before applying stat(2) to the physical file. -1&: SELECT diskquota.relation_size('t_dummy_rel', false); +1&: SELECT diskquota.relation_size('t_dropped'); -- Drop the table. -DROP TABLE t_dummy_rel; +DROP TABLE t_dropped; DROP -- Remove the injected 'suspension'. SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p'; @@ -46,3 +46,34 @@ SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', db --------------- 0 (1 row) + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true); +CREATE +1: BEGIN; +BEGIN +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +2: BEGIN; +BEGIN +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +SELECT diskquota.relation_size('t_ao'); + relation_size +--------------- + 200400 +(1 row) +SELECT pg_relation_size('t_ao'); + pg_relation_size +------------------ + 200400 +(1 row) +DROP TABLE t_ao; +DROP diff --git a/tests/isolation2/sql/test_relation_size.sql b/tests/isolation2/sql/test_relation_size.sql index 735fa3cc3c1..d06cdfb4dfc 100644 --- a/tests/isolation2/sql/test_relation_size.sql +++ b/tests/isolation2/sql/test_relation_size.sql @@ -4,22 +4,37 @@ -- the error and returns 0. -- -CREATE TABLE t_dummy_rel(i int); --- Insert a small amount of data to 't_dummy_rel'. -INSERT INTO t_dummy_rel SELECT generate_series(1, 100); +CREATE TABLE t_dropped(i int); +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); -- Shows that the size of relfilenode is not zero. -SELECT diskquota.relation_size('t_dummy_rel', false); +SELECT diskquota.relation_size('t_dropped'); -- Inject 'suspension' to servers. SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p'; -- Session 1 will hang before applying stat(2) to the physical file. -1&: SELECT diskquota.relation_size('t_dummy_rel', false); +1&: SELECT diskquota.relation_size('t_dropped'); -- Drop the table. -DROP TABLE t_dummy_rel; +DROP TABLE t_dropped; -- Remove the injected 'suspension'. SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p'; -- Session 1 will continue and returns 0. 1<: + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true); +1: BEGIN; +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +2: BEGIN; +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +1: COMMIT; +2: COMMIT; +SELECT diskquota.relation_size('t_ao'); +SELECT pg_relation_size('t_ao'); +DROP TABLE t_ao; diff --git a/tests/regress/expected/test_relation_size.out b/tests/regress/expected/test_relation_size.out index 4828b10d74d..2592aff7612 100644 --- a/tests/regress/expected/test_relation_size.out +++ b/tests/regress/expected/test_relation_size.out @@ -2,7 +2,7 @@ CREATE TEMP TABLE t1(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1', true); +SELECT diskquota.relation_size('t1'); relation_size --------------- 688128 @@ -18,7 +18,7 @@ CREATE TABLE t2(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2', false); +SELECT diskquota.relation_size('t2'); relation_size --------------- 688128 @@ -38,7 +38,7 @@ NOTICE: tablespace "test_spc" does not exist, skipping CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; ALTER TABLE t1 SET TABLESPACE test_spc; INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1', true); +SELECT diskquota.relation_size('t1'); relation_size --------------- 1081344 @@ -52,7 +52,7 @@ SELECT pg_table_size('t1'); ALTER TABLE t2 SET TABLESPACE test_spc; INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2', false); +SELECT diskquota.relation_size('t2'); relation_size --------------- 1081344 @@ -67,3 +67,36 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; \! rm -rf /tmp/test_spc +CREATE TABLE ao (i int) WITH (appendonly=true); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO ao SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('ao'); + relation_size +--------------- + 100200 +(1 row) + +SELECT pg_relation_size('ao'); + pg_relation_size +------------------ + 100200 +(1 row) + +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.relation_size('aocs'); + relation_size +--------------- + 10092696 +(1 row) + +SELECT pg_relation_size('aocs'); + pg_relation_size +------------------ + 10092696 +(1 row) + +DROP TABLE aocs; diff --git a/tests/regress/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql index d52f0b95e49..98fddfd305c 100644 --- a/tests/regress/sql/test_relation_size.sql +++ b/tests/regress/sql/test_relation_size.sql @@ -1,11 +1,11 @@ CREATE TEMP TABLE t1(i int); INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1', true); +SELECT diskquota.relation_size('t1'); SELECT pg_table_size('t1'); CREATE TABLE t2(i int); INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2', false); +SELECT diskquota.relation_size('t2'); SELECT pg_table_size('t2'); -- start_ignore @@ -16,15 +16,25 @@ CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; ALTER TABLE t1 SET TABLESPACE test_spc; INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1', true); +SELECT diskquota.relation_size('t1'); SELECT pg_table_size('t1'); ALTER TABLE t2 SET TABLESPACE test_spc; INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2', false); +SELECT diskquota.relation_size('t2'); SELECT pg_table_size('t2'); - DROP TABLE t1, t2; DROP TABLESPACE test_spc; \! rm -rf /tmp/test_spc + +CREATE TABLE ao (i int) WITH (appendonly=true); +INSERT INTO ao SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('ao'); +SELECT pg_relation_size('ao'); + +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.relation_size('aocs'); +SELECT pg_relation_size('aocs'); +DROP TABLE aocs; From 0f8545d94e9a90e0b2c056765267a5971197ff36 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Wed, 1 Dec 2021 15:02:30 +0800 Subject: [PATCH 087/330] Add cache to hold uncommitted relations (#91) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. add relation_cache_lock 2. add relation_cache and relid_cache in SHM 3. add relation_cache test case Co-authored-by: hzhang2 Co-authored-by: Xuebin Su (苏学斌) Co-authored-by: Xing Guo --- Makefile | 4 +- diskquota--2.0.sql | 19 + diskquota.h | 3 + diskquota_utility.c | 24 ++ gp_activetable.c | 93 ++++- quotamodel.c | 6 + relation_cache.c | 383 ++++++++++++++++++ relation_cache.h | 38 ++ tests/regress/diskquota_schedule | 1 + .../regress/expected/test_relation_cache.out | 135 ++++++ tests/regress/sql/test_relation_cache.sql | 68 ++++ 11 files changed, 767 insertions(+), 7 deletions(-) create mode 100644 relation_cache.c create mode 100644 relation_cache.h create mode 100644 tests/regress/expected/test_relation_cache.out create mode 100644 tests/regress/sql/test_relation_cache.sql diff --git a/Makefile b/Makefile index e7dd111f901..662227225a1 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,8 @@ MODULE_big = diskquota EXTENSION = diskquota DATA = diskquota--1.0.sql diskquota--2.0.sql diskquota--1.0--2.0.sql diskquota--2.0--1.0.sql SRCDIR = ./ -FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c -OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o +FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c relation_cache.c +OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o relation_cache.o PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index fe99074d702..0147c79a87a 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -150,3 +150,22 @@ SELECT sum(size)::bigint FROM ( FROM pg_class WHERE oid = relation ) AS t $$ LANGUAGE SQL; + +CREATE TYPE diskquota.relation_cache_detail AS + (RELID oid, PRIMARY_TABLE_OID oid, AUXREL_NUM int, + OWNEROID oid, NAMESPACEOID oid, BACKENDID int, SPCNODE oid, DBNODE oid, RELNODE oid, RELSTORAGE "char", AUXREL_OID oid[]); + +CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() +RETURNS setof diskquota.relation_cache_detail +AS 'MODULE_PATHNAME', 'show_relation_cache' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() +RETURNS setof diskquota.relation_cache_detail +as $$ +WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') +) +SELECT (a).* FROM relation_cache; +$$ LANGUAGE SQL; diff --git a/diskquota.h b/diskquota.h index ca644be0ba8..4e1c6a4b552 100644 --- a/diskquota.h +++ b/diskquota.h @@ -36,6 +36,7 @@ struct DiskQuotaLocks LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ LWLock *monitoring_dbid_cache_lock; LWLock *paused_lock; + LWLock *relation_cache_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; #define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) @@ -118,4 +119,6 @@ extern int SEGCOUNT; extern int get_ext_major_version(void); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); +extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode); +extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index 668c3716cfc..3d91ec93f7a 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -29,6 +29,7 @@ #include "catalog/pg_extension.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" +#include "catalog/indexing.h" #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/tablespace.h" @@ -1286,3 +1287,26 @@ relation_size_local(PG_FUNCTION_ARGS) PG_RETURN_INT64(size); } + +Relation +diskquota_relation_open(Oid relid, LOCKMODE mode) +{ + Relation rel; + bool success_open = false; + int32 SavedInterruptHoldoffCount = InterruptHoldoffCount; + + PG_TRY(); + { + rel = relation_open(relid, mode); + success_open = true; + } + PG_CATCH(); + { + InterruptHoldoffCount = SavedInterruptHoldoffCount; + HOLD_INTERRUPTS(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + return success_open ? rel : NULL; +} diff --git a/gp_activetable.c b/gp_activetable.c index 61a7ac4f038..b7f5eacd213 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -19,7 +19,9 @@ #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/pg_class.h" +#include "catalog/pg_namespace.h" #include "catalog/pg_type.h" +#include "catalog/objectaccess.h" #include "cdb/cdbbufferedappend.h" #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" @@ -39,9 +41,11 @@ #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/relfilenodemap.h" +#include "utils/syscache.h" #include "gp_activetable.h" #include "diskquota.h" +#include "relation_cache.h" PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); @@ -59,10 +63,14 @@ HTAB *monitoring_dbid_cache = NULL; static file_create_hook_type prev_file_create_hook = NULL; static file_extend_hook_type prev_file_extend_hook = NULL; static file_truncate_hook_type prev_file_truncate_hook = NULL; +static file_unlink_hook_type prev_file_unlink_hook = NULL; +static object_access_hook_type prev_object_access_hook = NULL; static void active_table_hook_smgrcreate(RelFileNodeBackend rnode); static void active_table_hook_smgrextend(RelFileNodeBackend rnode); static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode); +static void active_table_hook_smgrunlink(RelFileNodeBackend rnode); +static void object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static HTAB *get_active_tables_stats(ArrayType *array); static HTAB *get_active_tables_oid(void); @@ -71,6 +79,7 @@ static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *a static StringInfoData convert_map_to_string(HTAB *active_list); static void load_table_size(HTAB *local_table_stats_map); static void report_active_table_helper(const RelFileNodeBackend *relFileNode); +static void report_relation_cache_helper(Oid relid); void init_active_table_hook(void); void init_shm_worker_active_tables(void); @@ -112,6 +121,12 @@ init_active_table_hook(void) prev_file_truncate_hook = file_truncate_hook; file_truncate_hook = active_table_hook_smgrtruncate; + + prev_file_unlink_hook = file_unlink_hook; + file_unlink_hook = active_table_hook_smgrunlink; + + prev_object_access_hook = object_access_hook; + object_access_hook = object_access_hook_QuotaStmt; } /* @@ -152,6 +167,65 @@ active_table_hook_smgrtruncate(RelFileNodeBackend rnode) report_active_table_helper(&rnode); } +static void +active_table_hook_smgrunlink(RelFileNodeBackend rnode) +{ + if (prev_file_unlink_hook) + (*prev_file_unlink_hook) (rnode); + + remove_cache_entry(InvalidOid, rnode.node.relNode); +} + +static void +object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) +{ + if (prev_object_access_hook) + (*prev_object_access_hook)(access, classId, objectId, subId, arg); + + /* TODO: do we need to use "&&" instead of "||"? */ + if (classId != RelationRelationId || subId != 0) + { + return; + } + + if (objectId < FirstNormalObjectId) + { + return; + } + + if (access != OAT_POST_CREATE) + { + return; + } + + report_relation_cache_helper(objectId); +} + +static void +report_relation_cache_helper(Oid relid) +{ + bool found; + + /* We do not collect the active table in either master or mirror segments */ + if (IS_QUERY_DISPATCHER() || IsRoleMirror()) + { + return; + } + + /* + * Do not collect active table info when the database is not under monitoring. + * this operation is read-only and does not require absolutely exact. + * read the cache with out shared lock. + */ + hash_search(monitoring_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + if (!found) + { + return; + } + + update_relation_cache(relid); +} + /* * Common function for reporting active tables * Currently, any file events(create, extend. truncate) are @@ -604,6 +678,8 @@ get_active_tables_oid(void) &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + remove_committed_relation_from_cache(); + /* * scan whole local map, get the oid of each table and calculate the size * of them @@ -613,14 +689,21 @@ get_active_tables_oid(void) while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) { bool found; - - relOid = RelidByRelfilenode(active_table_file_entry->tablespaceoid, active_table_file_entry->relfilenode); + RelFileNode rnode; + Oid prelid; + + rnode.dbNode = active_table_file_entry->dbid; + rnode.relNode = active_table_file_entry->relfilenode; + rnode.spcNode = active_table_file_entry->tablespaceoid; + relOid = get_relid_by_relfilenode(rnode); + if (relOid != InvalidOid) { - active_table_entry = hash_search(local_active_table_stats_map, &relOid, HASH_ENTER, &found); - if (active_table_entry) + prelid = get_primary_table_oid(relOid); + active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); + if (active_table_entry && !found) { - active_table_entry->reloid = relOid; + active_table_entry->reloid = prelid; /* we don't care segid and tablesize here */ active_table_entry->tablesize = 0; active_table_entry->segid = -1; diff --git a/quotamodel.c b/quotamodel.c index 919a0dfd9c7..678c85a880d 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -52,6 +52,7 @@ #include "gp_activetable.h" #include "diskquota.h" +#include "relation_cache.h" /* cluster level max size of black list */ #define MAX_DISK_QUOTA_BLACK_ENTRIES (1024 * 1024) @@ -430,6 +431,8 @@ disk_quota_shmem_startup(void) init_shm_worker_active_tables(); + init_shm_worker_relation_cache(); + memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(Oid); @@ -469,6 +472,7 @@ init_lwlocks(void) diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); diskquota_locks.paused_lock = LWLockAssign(); + diskquota_locks.relation_cache_lock = LWLockAssign(); } /* @@ -483,6 +487,8 @@ DiskQuotaShmemSize(void) size = sizeof(ExtensionDDLMessage); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(GlobalBlackMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); size += sizeof(bool); /* sizeof(*diskquota_paused) */ return size; diff --git a/relation_cache.c b/relation_cache.c new file mode 100644 index 00000000000..b00af0c1e8c --- /dev/null +++ b/relation_cache.c @@ -0,0 +1,383 @@ +#include "postgres.h" + +#include "catalog/indexing.h" +#include "catalog/pg_class.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_type.h" +#include "catalog/objectaccess.h" +#include "executor/spi.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/relfilenodemap.h" +#include "utils/syscache.h" +#include "utils/array.h" +#include "funcapi.h" + +#include "relation_cache.h" +#include "diskquota.h" + +HTAB *relation_cache = NULL; +HTAB *relid_cache = NULL; + +static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry); + +PG_FUNCTION_INFO_V1(show_relation_cache); + +void +init_shm_worker_relation_cache(void) +{ + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + ctl.hash = tag_hash; + + relation_cache = ShmemInitHash("relation_cache", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); + + memset(&ctl, 0, sizeof(ctl)); + + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelidCacheEntry); + ctl.hash = tag_hash; + + relid_cache = ShmemInitHash("relid_cache", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); +} + +Oid +get_relid_by_relfilenode(RelFileNode relfilenode) +{ + Oid relid; + + relid = RelidByRelfilenode(relfilenode.spcNode, relfilenode.relNode); + if(OidIsValid(relid)) + { + remove_cache_entry(InvalidOid, relfilenode.relNode); + return relid; + } + + relid = get_uncommitted_table_relid(relfilenode.relNode); + return relid; +} + +void +remove_cache_entry(Oid relid, Oid relfilenode) +{ + DiskQuotaRelationCacheEntry *relation_entry; + DiskQuotaRelidCacheEntry *relid_entry; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + if (OidIsValid(relid)) + { + relation_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_entry) + { + hash_search(relid_cache, &relation_entry->rnode.node.relNode, HASH_REMOVE, NULL); + hash_search(relation_cache, &relid, HASH_REMOVE, NULL); + } + } + + if (OidIsValid(relfilenode)) + { + relid_entry = hash_search(relid_cache, &relfilenode, HASH_FIND, NULL); + if (relid_entry) + { + hash_search(relation_cache, &relid_entry->relid, HASH_REMOVE, NULL); + hash_search(relid_cache, &relfilenode, HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); +} + +Oid +get_uncommitted_table_relid(Oid relfilenode) +{ + Oid relid = InvalidOid; + DiskQuotaRelidCacheEntry *entry; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + entry = hash_search(relid_cache, &relfilenode, HASH_FIND, NULL); + if (entry) + { + relid = entry->relid; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return relid; +} + +static void +add_auxrelid_to_relation_entry(DiskQuotaRelationCacheEntry *entry, Oid relid) +{ + int i; + + for (i = 0; i < entry->auxrel_num; i++) + { + if (entry->auxrel_oid[i] == relid) + { + return; + } + } + entry->auxrel_oid[entry->auxrel_num++] = relid; +} + +static void +update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry) +{ + Relation rel; + + rel = diskquota_relation_open(relid, NoLock); + if (rel == NULL) + { + return; + } + + if (relation_entry) + { + relation_entry->relid = relid; + relation_entry->rnode.node = rel->rd_node; + relation_entry->rnode.backend = rel->rd_backend; + relation_entry->owneroid = rel->rd_rel->relowner; + relation_entry->namespaceoid = rel->rd_rel->relnamespace; + relation_entry->relstorage = rel->rd_rel->relstorage; + } + + if (relid_entry) + { + relid_entry->relfilenode = rel->rd_node.relNode; + relid_entry->relid = relid; + } + + relation_entry->primary_table_relid = relid; + + relation_close(rel, NoLock); +} + +void +update_relation_cache(Oid relid) +{ + DiskQuotaRelationCacheEntry relation_entry_data = {0}; + DiskQuotaRelationCacheEntry *relation_entry; + DiskQuotaRelidCacheEntry relid_entry_data = {0}; + DiskQuotaRelidCacheEntry *relid_entry; + Oid prelid; + + update_relation_entry(relid, &relation_entry_data, &relid_entry_data); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + relation_entry = hash_search(relation_cache, &relation_entry_data.relid, HASH_ENTER, NULL); + memcpy(relation_entry, &relation_entry_data, sizeof(DiskQuotaRelationCacheEntry)); + + relid_entry = hash_search(relid_cache, &relid_entry_data.relfilenode, HASH_ENTER, NULL); + memcpy(relid_entry, &relid_entry_data, sizeof(DiskQuotaRelidCacheEntry)); + LWLockRelease(diskquota_locks.relation_cache_lock); + + prelid = get_primary_table_oid(relid); + if (OidIsValid(prelid) && prelid != relid) + { + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); + relation_entry->primary_table_relid = prelid; + relation_entry = hash_search(relation_cache, &prelid, HASH_FIND, NULL); + if (relation_entry) + { + add_auxrelid_to_relation_entry(relation_entry, relid); + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } +} + +static Oid +parse_primary_table_oid(Oid relid) +{ + Relation rel; + Oid namespace; + char relname[NAMEDATALEN]; + + rel = diskquota_relation_open(relid, NoLock); + if (rel == NULL) + { + return InvalidOid; + } + + namespace = rel->rd_rel->relnamespace; + memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); + relation_close(rel, NoLock); + + switch (namespace) + { + case PG_TOAST_NAMESPACE: + if (strncmp(relname, "pg_toast", 8) == 0) + return atoi(&relname[9]); + break; + case PG_AOSEGMENT_NAMESPACE: + { + if (strncmp(relname, "pg_aoseg", 8) == 0) + return atoi(&relname[9]); + else if (strncmp(relname, "pg_aovisimap", 12) == 0) + return atoi(&relname[13]); + else if (strncmp(relname, "pg_aocsseg", 10) == 0) + return atoi(&relname[11]); + } + break; + } + return relid; +} + +Oid +get_primary_table_oid(Oid relid) +{ + DiskQuotaRelationCacheEntry *relation_entry; + Oid cached_prelid = relid; + Oid parsed_prelid; + + parsed_prelid = parse_primary_table_oid(relid); + if (OidIsValid(parsed_prelid)) + { + return parsed_prelid; + } + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_entry) + { + cached_prelid = relation_entry->primary_table_relid; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return cached_prelid; +} + +void +remove_committed_relation_from_cache(void) +{ + HASH_SEQ_STATUS iter = {0}; + hash_seq_init(&iter, relation_cache); + + DiskQuotaRelationCacheEntry *entry = NULL; + while ((entry = hash_seq_search(&iter)) != NULL) + + { + if (SearchSysCacheExists1(RELOID, entry->relid)) + remove_cache_entry(entry->relid, InvalidOid); + } + +} + +Datum +show_relation_cache(PG_FUNCTION_ARGS) +{ + DiskQuotaRelationCacheEntry *entry; + FuncCallContext *funcctx; + struct RelationCacheCtx { + HASH_SEQ_STATUS iter; + HTAB *relation_cache; + } *relation_cache_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = CreateTemplateTupleDesc(11, false /*hasoid*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "RELID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "PRIMARY_TABLE_OID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "AUXREL_NUM", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, "OWNEROID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 5, "NAMESPACEOID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 6, "BACKENDID", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 7, "SPCNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 8, "DBNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 9, "RELNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 10, "RELSTORAGE", CHAROID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 11, "AUXREL_OID", OIDARRAYOID, -1 /*typmod*/, 0 /*attdim*/); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* Create a local hash table and fill it with entries from shared memory. */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(Oid); + hashctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; + + relation_cache_ctx = (struct RelationCacheCtx *) palloc(sizeof(struct RelationCacheCtx)); + relation_cache_ctx->relation_cache = hash_create("relation_cache_ctx->relation_cache", + 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&hash_seq, relation_cache); + while ((entry = (DiskQuotaRelationCacheEntry *) hash_seq_search(&hash_seq)) != NULL) + { + DiskQuotaRelationCacheEntry *local_entry = hash_search(relation_cache_ctx->relation_cache, + &entry->relid, HASH_ENTER_NULL, NULL); + if (local_entry) + { + memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + /* Setup first calling context. */ + hash_seq_init(&(relation_cache_ctx->iter), relation_cache_ctx->relation_cache); + funcctx->user_fctx = (void *) relation_cache_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + relation_cache_ctx = (struct RelationCacheCtx *) funcctx->user_fctx; + + while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&(relation_cache_ctx->iter))) != NULL) + { + Datum result; + Datum values[11]; + Datum auxrel_oid[10]; + bool nulls[11]; + HeapTuple tuple; + ArrayType *array; + int i; + + for (i = 0; i < entry->auxrel_num; i++) + { + auxrel_oid[i] = ObjectIdGetDatum(entry->auxrel_oid[i]); + } + array = construct_array(auxrel_oid, entry->auxrel_num, OIDOID, sizeof(Oid), true, 'i'); + + values[0] = ObjectIdGetDatum(entry->relid); + values[1] = ObjectIdGetDatum(entry->primary_table_relid); + values[2] = Int32GetDatum(entry->auxrel_num); + values[3] = ObjectIdGetDatum(entry->owneroid); + values[4] = ObjectIdGetDatum(entry->namespaceoid); + values[5] = Int32GetDatum(entry->rnode.backend); + values[6] = ObjectIdGetDatum(entry->rnode.node.spcNode); + values[7] = ObjectIdGetDatum(entry->rnode.node.dbNode); + values[8] = ObjectIdGetDatum(entry->rnode.node.relNode); + values[9] = CharGetDatum(entry->relstorage); + values[10] = PointerGetDatum(array); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + SRF_RETURN_DONE(funcctx); +} diff --git a/relation_cache.h b/relation_cache.h new file mode 100644 index 00000000000..6817c9612d2 --- /dev/null +++ b/relation_cache.h @@ -0,0 +1,38 @@ +#ifndef RELATION_CACHE_H +#define RELATION_CACHE_H + +#include "storage/relfilenode.h" +#include "utils/relcache.h" +#include "storage/lock.h" +#include "postgres.h" + +typedef struct DiskQuotaRelationCacheEntry +{ + Oid relid; + Oid primary_table_relid; + Oid auxrel_oid[10]; + Oid auxrel_num; + Oid owneroid; + Oid namespaceoid; + char relstorage; + RelFileNodeBackend rnode; +} DiskQuotaRelationCacheEntry; + +typedef struct DiskQuotaRelidCacheEntry +{ + Oid relfilenode; + Oid relid; +} DiskQuotaRelidCacheEntry; + +extern HTAB *relation_cache; + +extern void init_shm_worker_relation_cache(void); +extern Oid get_relid_by_relfilenode(RelFileNode relfilenode); +extern void remove_cache_entry(Oid relid, Oid relfilenode); +extern Oid get_uncommitted_table_relid(Oid relfilenode); +extern void update_relation_cache(Oid relid); +extern Oid get_primary_table_oid(Oid relid); +extern void remove_committed_relation_from_cache(void); + + +#endif diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index b8ce67003fb..9bda7c1f8e3 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,6 +1,7 @@ test: init test: prepare test: test_relation_size +test: test_relation_cache # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_relation_cache.out b/tests/regress/expected/test_relation_cache.out new file mode 100644 index 00000000000..dbdba6482c6 --- /dev/null +++ b/tests/regress/expected/test_relation_cache.out @@ -0,0 +1,135 @@ +-- init +CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() +RETURNS boolean +as $$ +declare t1 oid[]; +declare t2 oid[]; +begin +t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); +t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); +return t1 = t2; +end; +$$ LANGUAGE plpgsql; +-- heap table +begin; +create table t(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into t select generate_series(1, 100000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 3 +(1 row) + +commit; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- toast table +begin; +create table t(t text); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 9 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AO table +begin; +create table t(a int, b text) with(appendonly=true); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 18 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AOCS table +begin; +create table t(a int, b text) with(appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 18 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +DROP FUNCTION diskquota.check_relation_cache(); diff --git a/tests/regress/sql/test_relation_cache.sql b/tests/regress/sql/test_relation_cache.sql new file mode 100644 index 00000000000..c5371e0df50 --- /dev/null +++ b/tests/regress/sql/test_relation_cache.sql @@ -0,0 +1,68 @@ +-- init +CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() +RETURNS boolean +as $$ +declare t1 oid[]; +declare t2 oid[]; +begin +t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); +t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); +return t1 = t2; +end; +$$ LANGUAGE plpgsql; + +-- heap table +begin; +create table t(i int); +insert into t select generate_series(1, 100000); + +select count(*) from diskquota.show_relation_cache_all_seg(); +commit; + +select pg_sleep(5); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- toast table +begin; +create table t(t text); +insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); + +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select pg_sleep(5); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- AO table +begin; +create table t(a int, b text) with(appendonly=true); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; + +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select pg_sleep(5); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +-- AOCS table +begin; +create table t(a int, b text) with(appendonly=true, orientation=column); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; + +select count(*) from diskquota.show_relation_cache_all_seg(); + +select diskquota.check_relation_cache(); +commit; + +select pg_sleep(5); +select count(*) from diskquota.show_relation_cache_all_seg(); +drop table t; + +DROP FUNCTION diskquota.check_relation_cache(); From 72e94312adef41075f2520c981960dfe25b0f21b Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Wed, 1 Dec 2021 16:51:19 +0800 Subject: [PATCH 088/330] fix bug: acquire shared lock when traversing relation_cache in remove_committed_relation_from_cache() (#96) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: hzhang2 Co-authored-by: Xuebin Su (苏学斌) --- diskquota.h | 2 +- diskquota_utility.c | 3 +-- relation_cache.c | 36 ++++++++++++++++++++++++++++++------ 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/diskquota.h b/diskquota.h index 4e1c6a4b552..5e2fc96de0f 100644 --- a/diskquota.h +++ b/diskquota.h @@ -119,6 +119,6 @@ extern int SEGCOUNT; extern int get_ext_major_version(void); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); -extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode); +extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index 3d91ec93f7a..bbb0096e093 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -82,7 +82,6 @@ static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, Quota static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); static char *convert_oidlist_to_string(List *oidlist); -static int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); int get_ext_major_version(void); List *get_rel_oid_list(void); @@ -1231,7 +1230,7 @@ relation_file_stat(int segno, void *ctx) * calculate size of (all forks of) a relation in transaction * This function is following calculate_relation_size() */ -static int64 +int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) { int64 totalsize = 0; diff --git a/relation_cache.c b/relation_cache.c index b00af0c1e8c..3049e8c698f 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -260,16 +260,40 @@ void remove_committed_relation_from_cache(void) { HASH_SEQ_STATUS iter = {0}; - hash_seq_init(&iter, relation_cache); - DiskQuotaRelationCacheEntry *entry = NULL; - while ((entry = hash_seq_search(&iter)) != NULL) + DiskQuotaRelationCacheEntry *local_entry = NULL; + HTAB *local_relation_cache; + HASHCTL ctl; + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + local_relation_cache = hash_create("local relation cache", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((entry = hash_seq_search(&iter)) != NULL) { - if (SearchSysCacheExists1(RELOID, entry->relid)) - remove_cache_entry(entry->relid, InvalidOid); + local_entry = hash_search(local_relation_cache, &entry->relid, HASH_ENTER, NULL); + memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); } - + LWLockRelease(diskquota_locks.relation_cache_lock); + + hash_seq_init(&iter, local_relation_cache); + while ((local_entry = hash_seq_search(&iter)) != NULL) + { + if (SearchSysCacheExists1(RELOID, local_entry->relid)) + { + remove_cache_entry(local_entry->relid, InvalidOid); + } + } + hash_destroy(local_relation_cache); } Datum From 2a780ebbd9205a7f827dac9af70aa5c372a0bb72 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Wed, 1 Dec 2021 17:26:09 +0800 Subject: [PATCH 089/330] drop table after testing relation_size() (#97) Co-authored-by: hzhang2 --- tests/regress/sql/test_relation_size.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/regress/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql index 98fddfd305c..fe277cd89ff 100644 --- a/tests/regress/sql/test_relation_size.sql +++ b/tests/regress/sql/test_relation_size.sql @@ -32,6 +32,7 @@ CREATE TABLE ao (i int) WITH (appendonly=true); INSERT INTO ao SELECT generate_series(1, 10000); SELECT diskquota.relation_size('ao'); SELECT pg_relation_size('ao'); +DROP TABLE ao; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; From bcd920ddd47a72be58487eb77901066c5fbd5ee9 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Wed, 1 Dec 2021 17:57:27 +0800 Subject: [PATCH 090/330] Fix test case bug (#98) * Fix test case bug: Drop test table after testing diskquota.relation_size(), because the relation_entry of the table in this test case will affect the next test case. Co-authored-by: hzhang2 --- tests/regress/expected/test_relation_size.out | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/regress/expected/test_relation_size.out b/tests/regress/expected/test_relation_size.out index 2592aff7612..7841bec9f81 100644 --- a/tests/regress/expected/test_relation_size.out +++ b/tests/regress/expected/test_relation_size.out @@ -83,6 +83,7 @@ SELECT pg_relation_size('ao'); 100200 (1 row) +DROP TABLE ao; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. From 099a4a059a2546b217746df6e144fe169c4786d1 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 2 Dec 2021 15:43:01 +0800 Subject: [PATCH 091/330] Add support for dispatching blockmap to segment servers. (#89) * Add support for dispatching blockmap to segment servers. This patch adds support for dispatching blockmap to segment servers. The basic idea is that, we iterate over the active relations' oid, check that whether the relation's owner/tablespace/namespace is in one of the blockmap entries dispatched from diskquota worker from QD. If the relation should be blocked, we then add its relfilenode together with the relfilenodes of toast relation, toast index relation, appendonly auxiliary relations, appendonly auxiliary relations' indexes to the global blockmap on segment servers. This patch is a prerequisite of the hard-limit feature. This patch also introduces diskquota.blockmap view. User is able to query the current blocking status by 'SELECT * FROM diskquota.blockmap'. Co-authored-by: Xuebin Su Co-authored-by: Hao Zhang --- diskquota--1.0--2.0.sql | 19 + diskquota--2.0--1.0.sql | 10 + diskquota--2.0.sql | 19 + diskquota.h | 2 +- enforcement.c | 4 +- gp_activetable.c | 1 + quotamodel.c | 623 +++++++++++++++++++- tests/isolation2/expected/test_blackmap.out | 241 ++++++++ tests/isolation2/isolation2_schedule | 1 + tests/isolation2/sql/test_blackmap.sql | 178 ++++++ tests/regress/diskquota_schedule | 1 + tests/regress/expected/test_blackmap.out | 283 +++++++++ tests/regress/sql/test_blackmap.sql | 196 ++++++ 13 files changed, 1549 insertions(+), 29 deletions(-) create mode 100644 tests/isolation2/expected/test_blackmap.out create mode 100644 tests/isolation2/sql/test_blackmap.sql create mode 100644 tests/regress/expected/test_blackmap.out create mode 100644 tests/regress/sql/test_blackmap.sql diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 9803eeb59de..9e9b7abb5f3 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -32,6 +32,25 @@ RETURNS void STRICT AS 'MODULE_PATHNAME', 'diskquota_resume' LANGUAGE C; +CREATE TYPE diskquota.blackmap_entry AS + (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); +CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE TYPE diskquota.blackmap_entry_detail AS + (target_type text, target_oid oid, database_oid oid, + tablespace_oid oid, seg_exceeded boolean, dbnode oid, spcnode oid, relnode oid, segid int); + +CREATE FUNCTION diskquota.show_blackmap() +RETURNS setof diskquota.blackmap_entry_detail +AS 'MODULE_PATHNAME', 'show_blackmap' +LANGUAGE C; + +CREATE VIEW diskquota.blackmap AS + SELECT * FROM diskquota.show_blackmap() AS BM; + ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid,segid); diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index c79c00c18de..5b5fa22043e 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -8,6 +8,16 @@ DROP FUNCTION IF EXISTS diskquota.pause(); DROP FUNCTION IF EXISTS diskquota.resume(); +DROP FUNCTION IF EXISTS diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); + +DROP TYPE IF EXISTS diskquota.blackmap_entry; + +DROP VIEW IF EXISTS diskquota.blackmap; + +DROP FUNCTION IF EXISTS diskquota.show_blackmap(); + +DROP TYPE IF EXISTS diskquota.blackmap_entry_detail; + CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes from diskquota.table_size as ts, diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 0147c79a87a..89903967ce7 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -48,6 +48,25 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE TYPE diskquota.blackmap_entry AS + (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); +CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + +CREATE TYPE diskquota.blackmap_entry_detail AS + (target_type text, target_oid oid, database_oid oid, + tablespace_oid oid, seg_exceeded boolean, dbnode oid, spcnode oid, relnode oid, segid int); + +CREATE FUNCTION diskquota.show_blackmap() +RETURNS setof diskquota.blackmap_entry_detail +AS 'MODULE_PATHNAME', 'show_blackmap' +LANGUAGE C; + +CREATE VIEW diskquota.blackmap AS + SELECT * FROM diskquota.show_blackmap() AS BM; + CREATE TABLE diskquota.table_size (tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); diff --git a/diskquota.h b/diskquota.h index 5e2fc96de0f..05614f40b20 100644 --- a/diskquota.h +++ b/diskquota.h @@ -106,7 +106,7 @@ extern void init_disk_quota_shmem(void); extern void init_disk_quota_model(void); extern void refresh_disk_quota_model(bool force); extern bool check_diskquota_state_is_ready(void); -extern bool quota_check_common(Oid reloid); +extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); /* quotaspi interface */ extern void init_disk_quota_hook(void); diff --git a/enforcement.c b/enforcement.c index c1fe6af6ea6..d8ccc13ec54 100644 --- a/enforcement.c +++ b/enforcement.c @@ -70,7 +70,7 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) * table's owner are reached. This function will ereport(ERROR) when * quota limit exceeded. */ - quota_check_common(rte->relid); + quota_check_common(rte->relid, NULL /*relfilenode*/); /* Check the indexes of the this relation */ relation = try_relation_open(rte->relid, AccessShareLock, false); if (!relation) @@ -83,7 +83,7 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) { foreach(oid, indexIds) { - quota_check_common(lfirst_oid(oid)); + quota_check_common(lfirst_oid(oid), NULL /*relfilenode*/); } } } diff --git a/gp_activetable.c b/gp_activetable.c index b7f5eacd213..004427927e5 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -153,6 +153,7 @@ active_table_hook_smgrextend(RelFileNodeBackend rnode) (*prev_file_extend_hook) (rnode); report_active_table_helper(&rnode); + quota_check_common(InvalidOid /*reloid*/, &rnode.node); } /* diff --git a/quotamodel.c b/quotamodel.c index 678c85a880d..79671ecec3a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -18,12 +18,15 @@ #include "access/heapam.h" #include "access/htup_details.h" #include "access/reloptions.h" +#include "access/skey.h" #include "access/transam.h" #include "access/tupdesc.h" #include "access/xact.h" +#include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_database.h" +#include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/tablespace.h" @@ -35,9 +38,11 @@ #include "storage/ipc.h" #include "storage/latch.h" #include "storage/lwlock.h" +#include "storage/relfilenode.h" #include "storage/shmem.h" #include "utils/builtins.h" #include "utils/guc.h" +#include "utils/faultinjector.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/snapmgr.h" @@ -144,12 +149,25 @@ struct BlackMapEntry Oid databaseoid; Oid tablespaceoid; uint32 targettype; + /* + * QD index the blackmap by (targetoid, databaseoid, tablespaceoid, targettype). + * QE index the blackmap by (relfilenode). + */ + RelFileNode relfilenode; }; struct GlobalBlackMapEntry { BlackMapEntry keyitem; bool segexceeded; + /* + * When the quota limit is exceeded on segment servers, + * we need an extra auxiliary field to preserve the quota + * limitation information for error message on segment + * servers, e.g., targettype, targetoid. This field is + * useful on segment servers. + */ + BlackMapEntry auxblockinfo; }; /* local blacklist for which exceed their quota limit */ @@ -192,7 +210,7 @@ static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); static void init_lwlocks(void); -static void export_exceeded_error(GlobalBlackMapEntry *entry); +static void export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name); void truncateStringInfo(StringInfo str, int nchars); static void @@ -1340,40 +1358,57 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table return found; } +static bool +check_blackmap_by_relfilenode(RelFileNode relfilenode) +{ + bool found; + BlackMapEntry keyitem; + GlobalBlackMapEntry *entry; + + SIMPLE_FAULT_INJECTOR("check_blackmap_by_relfilenode"); + + memset(&keyitem, 0, sizeof(keyitem)); + memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); + + LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); + entry = hash_search(disk_quota_black_map, + &keyitem, HASH_FIND, &found); + + if (found && entry) + { + GlobalBlackMapEntry segblackentry; + memcpy(&segblackentry.keyitem, &entry->auxblockinfo, sizeof(BlackMapEntry)); + segblackentry.segexceeded = entry->segexceeded; + LWLockRelease(diskquota_locks.black_map_lock); + + export_exceeded_error(&segblackentry, true /*skip_name*/); + return false; + } + LWLockRelease(diskquota_locks.black_map_lock); + return true; +} + /* * Given table oid, check whether quota limit * of table's schema or table's owner are reached. * Do enforcement if quota exceeds. */ -bool -quota_check_common(Oid reloid) +static bool +check_blackmap_by_reloid(Oid reloid) { Oid ownerOid = InvalidOid; Oid nsOid = InvalidOid; Oid tablespaceoid = InvalidOid; bool found; - bool paused; BlackMapEntry keyitem; GlobalBlackMapEntry *entry; - if (!IsTransactionState()) - { - return true; - } - bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespaceoid); if (!found_rel) { return true; } - LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); - paused = *diskquota_paused; - LWLockRelease(diskquota_locks.paused_lock); - - if (paused) - return true; - LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { @@ -1402,13 +1437,14 @@ quota_check_common(Oid reloid) } keyitem.databaseoid = MyDatabaseId; keyitem.targettype = type; + memset(&keyitem.relfilenode, 0, sizeof(RelFileNode)); entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); if (found) { LWLockRelease(diskquota_locks.black_map_lock); - export_exceeded_error(entry); + export_exceeded_error(entry, false /*skip_name*/); return false; } } @@ -1416,6 +1452,35 @@ quota_check_common(Oid reloid) return true; } +/* + * Given relation's oid or relfilenode, check whether the + * quota limits of schema or owner are reached. Do enforcement + * if the quota exceeds. + */ +bool +quota_check_common(Oid reloid, RelFileNode *relfilenode) +{ + bool paused; + + if (!IsTransactionState()) + return true; + + LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); + paused = *diskquota_paused; + LWLockRelease(diskquota_locks.paused_lock); + + if (paused) + return true; + + if (OidIsValid(reloid)) + return check_blackmap_by_reloid(reloid); + + if (relfilenode) + return check_blackmap_by_relfilenode(*relfilenode); + + return true; +} + /* * invalidate all black entry with a specific dbid in SHM */ @@ -1437,8 +1502,44 @@ invalidate_database_blackmap(Oid dbid) LWLockRelease(diskquota_locks.black_map_lock); } +static char * +GetNamespaceName(Oid spcid, bool skip_name) +{ + if (skip_name) + { + NameData spcstr; + pg_ltoa(spcid, spcstr.data); + return pstrdup(spcstr.data); + } + return get_namespace_name(spcid); +} + +static char * +GetTablespaceName(Oid spcid, bool skip_name) +{ + if (skip_name) + { + NameData spcstr; + pg_ltoa(spcid, spcstr.data); + return pstrdup(spcstr.data); + } + return get_tablespace_name(spcid); +} + +static char * +GetUserName(Oid relowner, bool skip_name) +{ + if (skip_name) + { + NameData namestr; + pg_ltoa(relowner, namestr.data); + return pstrdup(namestr.data); + } + return GetUserNameFromId(relowner); +} + static void -export_exceeded_error(GlobalBlackMapEntry *entry) +export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) { BlackMapEntry *blackentry = &entry->keyitem; switch(blackentry->targettype) @@ -1446,39 +1547,509 @@ export_exceeded_error(GlobalBlackMapEntry *entry) case NAMESPACE_QUOTA: ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("schema's disk space quota exceeded with name:%s", get_namespace_name(blackentry->targetoid)))); + errmsg("schema's disk space quota exceeded with name:%s", GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_QUOTA: ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("role's disk space quota exceeded with name:%s", GetUserNameFromId(blackentry->targetoid)))); + errmsg("role's disk space quota exceeded with name:%s", GetUserName(blackentry->targetoid, skip_name)))); break; case NAMESPACE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", get_tablespace_name(blackentry->tablespaceoid), get_namespace_name(blackentry->targetoid)))); + errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); else - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded", get_tablespace_name(blackentry->tablespaceoid), get_namespace_name(blackentry->targetoid)))); + errmsg("tablespace:%s schema:%s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace: %s role: %s diskquota exceeded per segment quota", get_tablespace_name(blackentry->tablespaceoid), GetUserNameFromId(blackentry->targetoid)))); + errmsg("tablespace: %s role: %s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); else - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace: %s role: %s diskquota exceeded", get_tablespace_name(blackentry->tablespaceoid), GetUserNameFromId(blackentry->targetoid)))); + errmsg("tablespace: %s role: %s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); break; default : ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("diskquota exceeded, unknown quota type"))); } +} + +/* + * The order of the returned index list is not guaranteed, Don't + * apply the relation_open() to the returned list, or deadlock + * may happen. + */ +static List* +GetIndexOidListByRelid(Oid reloid) +{ + List *result = NIL; + ScanKeyData skey; + SysScanDesc indscan; + Relation indrel; + HeapTuple htup; + + ScanKeyInit(&skey, Anum_pg_index_indrelid, + BTEqualStrategyNumber, F_OIDEQ, reloid); + indrel = heap_open(IndexRelationId, AccessShareLock); + indscan = systable_beginscan(indrel, IndexIndrelidIndexId, + true /*indexOk*/, NULL /*snapshot*/, + 1 /*nkeys*/, &skey); + while (HeapTupleIsValid(htup = systable_getnext(indscan))) + { + Form_pg_index index = (Form_pg_index) GETSTRUCT(htup); + + if (!IndexIsLive(index)) + continue; + + result = lappend_oid(result, index->indexrelid); + } + systable_endscan(indscan); + heap_close(indrel, AccessShareLock); + + return result; +} + +/* + * Get auxiliary relations oid by searching the pg_appendonly table. + */ +static void +GetAppendOnlyEntryAuxOidListByRelid(Oid reloid, Oid *segrelid, + Oid *blkdirrelid, Oid *visimaprelid) +{ + ScanKeyData skey; + SysScanDesc scan; + TupleDesc tupDesc; + Relation aorel; + HeapTuple htup; + Datum auxoid; + bool isnull; + + ScanKeyInit(&skey, Anum_pg_appendonly_relid, + BTEqualStrategyNumber, F_OIDEQ, reloid); + aorel = heap_open(AppendOnlyRelationId, AccessShareLock); + tupDesc = RelationGetDescr(aorel); + scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, + true /*indexOk*/, NULL /*snapshot*/, + 1 /*nkeys*/, &skey); + while (HeapTupleIsValid(htup = systable_getnext(scan))) + { + if (segrelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_segrelid, + tupDesc, &isnull); + if (!isnull) + *segrelid = DatumGetObjectId(auxoid); + } + + if (blkdirrelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_blkdirrelid, + tupDesc, &isnull); + if (!isnull) + *blkdirrelid = DatumGetObjectId(auxoid); + } + + if (visimaprelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_visimaprelid, + tupDesc, &isnull); + if (!isnull) + *visimaprelid = DatumGetObjectId(auxoid); + } + } + + systable_endscan(scan); + heap_close(aorel, AccessShareLock); +} + +/* + * refresh_blackmap() takes two arguments. + * The first argument is an array of blackmap entries on QD. + * The second argument is an array of active relations' oid. + * + * The basic idea is that, we iterate over the active relations' oid, check that + * whether the relation's owner/tablespace/namespace is in one of the blackmap + * entries dispatched from diskquota worker from QD. If the relation should be + * blocked, we then add its relfilenode together with the toast, toast index, + * appendonly, appendonly index relations' relfilenodes to the global blackmap. + * Note that, this UDF is called on segment servers by diskquota worker on QD and + * the global blackmap on segment servers is indexed by relfilenode. + */ +PG_FUNCTION_INFO_V1(refresh_blackmap); +Datum +refresh_blackmap(PG_FUNCTION_ARGS) +{ + ArrayType *blackmap_array_type = PG_GETARG_ARRAYTYPE_P(0); + ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); + Oid blackmap_elem_type = ARR_ELEMTYPE(blackmap_array_type); + Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); + Datum *datums; + bool *nulls; + int16 elem_width; + bool elem_type_by_val; + char elem_alignment_code; + int count; + HeapTupleHeader lt; + bool segexceeded; + GlobalBlackMapEntry *blackmapentry; + HASH_SEQ_STATUS hash_seq; + HTAB *local_blackmap; + HASHCTL hashctl; + + if (!superuser()) + errmsg("must be superuser to update blackmap"); + + if (ARR_NDIM(blackmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); + + /* Firstly, clear the blackmap entries. */ + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, disk_quota_black_map); + while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) + hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.black_map_lock); + + if (SPI_connect() != SPI_OK_CONNECT) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query"))); + + /* + * Secondly, iterate over blackmap entries and add these entries to the local black map + * on segment servers so that we are able to check whether the given relation (by oid) + * should be blacked in O(1) time complexity in third step. + */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(BlackMapEntry); + hashctl.entrysize = sizeof(GlobalBlackMapEntry); + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; + + local_blackmap = hash_create("local_blackmap", + 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + get_typlenbyvalalign(blackmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); + deconstruct_array(blackmap_array_type, blackmap_elem_type, elem_width, + elem_type_by_val, elem_alignment_code, &datums, &nulls, &count); + for (int i = 0; i < count; ++i) + { + BlackMapEntry keyitem; + bool isnull; + + if (nulls[i]) + continue; + + memset(&keyitem, 0, sizeof(BlackMapEntry)); + lt = DatumGetHeapTupleHeader(datums[i]); + keyitem.targetoid = DatumGetObjectId(GetAttributeByNum(lt, 1, &isnull)); + keyitem.databaseoid = DatumGetObjectId(GetAttributeByNum(lt, 2, &isnull)); + keyitem.tablespaceoid = DatumGetObjectId(GetAttributeByNum(lt, 3, &isnull)); + keyitem.targettype = DatumGetInt32(GetAttributeByNum(lt, 4, &isnull)); + /* + * If the current quota limit type is NAMESPACE_TABLESPACE_QUOTA or + * ROLE_TABLESPACE_QUOTA, we should explicitly set DEFAULTTABLESPACE_OID + * for relations whose reltablespace is InvalidOid. + */ + if ((keyitem.targettype == NAMESPACE_TABLESPACE_QUOTA || + keyitem.targettype == ROLE_TABLESPACE_QUOTA) && + !OidIsValid(keyitem.tablespaceoid)) + keyitem.tablespaceoid = DEFAULTTABLESPACE_OID; + segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); + + blackmapentry = hash_search(local_blackmap, &keyitem, HASH_ENTER_NULL, NULL); + if (blackmapentry) + blackmapentry->segexceeded = segexceeded; + } + + /* + * Thirdly, iterate over the active oid list. Check that if the relation should be blocked. + * If the relation should be blocked, we insert the toast, toast index, appendonly, appendonly + * index relations to the global black map. + */ + get_typlenbyvalalign(active_oid_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); + deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, + elem_type_by_val, elem_alignment_code, &datums, &nulls, &count); + for (int i = 0; i < count; ++i) + { + Oid active_oid = InvalidOid; + HeapTuple tuple; + if (nulls[i]) + continue; + + active_oid = DatumGetObjectId(datums[i]); + if (!OidIsValid(active_oid)) + continue; + + tuple = SearchSysCacheCopy1(RELOID, active_oid); + if (HeapTupleIsValid(tuple)) + { + Form_pg_class form = (Form_pg_class) GETSTRUCT(tuple); + Oid relnamespace = form->relnamespace; + Oid reltablespace = OidIsValid(form->reltablespace) ? + form->reltablespace : DEFAULTTABLESPACE_OID; + Oid relowner = form->relowner; + BlackMapEntry keyitem; + bool found; + + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + /* + * Check that if the current relation should be blocked. + * FIXME: The logic of preparing the blackmap searching + * key is identical to check_blackmap_by_reloid(), we can + * make it into a static helper function. + */ + memset(&keyitem, 0, sizeof(BlackMapEntry)); + if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) + keyitem.targetoid = relowner; + else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem.targetoid = relnamespace; + if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem.tablespaceoid = reltablespace; + keyitem.databaseoid = MyDatabaseId; + keyitem.targettype = type; + + blackmapentry = hash_search(local_blackmap, + &keyitem, HASH_FIND, &found); + if (found && blackmapentry) + { + /* + * If the current relation is blocked, we should add the relfilenode + * of itself together with the relfilenodes of its toast relation and + * appendonly relations to the global black map. + */ + List *oid_list = NIL; + ListCell *cell = NULL; + Oid toastrelid = form->reltoastrelid; + Oid aosegrelid = InvalidOid; + Oid aoblkdirrelid = InvalidOid; + Oid aovisimaprelid = InvalidOid; + oid_list = lappend_oid(oid_list, active_oid); + + /* Append toast relation and toast index to the oid_list if any. */ + if (OidIsValid(toastrelid)) + { + oid_list = lappend_oid(oid_list, toastrelid); + oid_list = list_concat(oid_list, GetIndexOidListByRelid(toastrelid)); + } + + /* Append ao auxiliary relations and their indexes to the oid_list if any. */ + GetAppendOnlyEntryAuxOidListByRelid(active_oid, &aosegrelid, + &aoblkdirrelid, &aovisimaprelid); + if (OidIsValid(aosegrelid)) + { + oid_list = lappend_oid(oid_list, aosegrelid); + oid_list = list_concat(oid_list, GetIndexOidListByRelid(aosegrelid)); + } + if (OidIsValid(aoblkdirrelid)) + { + oid_list = lappend_oid(oid_list, aoblkdirrelid); + oid_list = list_concat(oid_list, GetIndexOidListByRelid(aoblkdirrelid)); + } + if (OidIsValid(aovisimaprelid)) + { + oid_list = lappend_oid(oid_list, aovisimaprelid); + oid_list = list_concat(oid_list, GetIndexOidListByRelid(aovisimaprelid)); + } + + /* Iterate over the oid_list and add their relfilenodes to the blackmap. */ + foreach(cell, oid_list) + { + Oid curr_oid = lfirst_oid(cell); + HeapTuple curr_tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(curr_oid)); + if (HeapTupleIsValid(curr_tuple)) + { + Form_pg_class curr_form = (Form_pg_class) GETSTRUCT(curr_tuple); + Oid curr_reltablespace = + OidIsValid(curr_form->reltablespace) ? + curr_form->reltablespace : DEFAULTTABLESPACE_OID; + RelFileNode relfilenode = + { .dbNode = MyDatabaseId, + .relNode = curr_form->relfilenode, + .spcNode = curr_reltablespace }; + bool found; + GlobalBlackMapEntry *blocked_filenode_entry; + BlackMapEntry blocked_filenode_keyitem; + + memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); + memcpy(&blocked_filenode_keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); + + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + blocked_filenode_entry = hash_search(disk_quota_black_map, + &blocked_filenode_keyitem, + HASH_ENTER_NULL, &found); + if (!found && blocked_filenode_entry) + { + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); + blocked_filenode_entry->segexceeded = blackmapentry->segexceeded; + } + LWLockRelease(diskquota_locks.black_map_lock); + } + } + /* + * The current relation may satisfy multiple blocking conditions, + * we only add it once. + */ + break; + } + } + } + } + + SPI_finish(); + PG_RETURN_VOID(); +} + +/* + * show_blackmap() provides developers or users to dump the blackmap in shared + * memory on a single server. If you want to query blackmap on segment servers, + * you should dispatch this query to segments. + */ +PG_FUNCTION_INFO_V1(show_blackmap); +Datum +show_blackmap(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + GlobalBlackMapEntry *blackmap_entry; + struct BlackMapCtx { + HASH_SEQ_STATUS blackmap_seq; + HTAB *blackmap; + } *blackmap_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = CreateTemplateTupleDesc(9, false /*hasoid*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "target_type", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "target_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "database_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, "tablespace_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 5, "seg_exceeded", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 6, "dbnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 7, "spcnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 8, "relnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 9, "segid", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* Create a local hash table and fill it with entries from shared memory. */ + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(BlackMapEntry); + hashctl.entrysize = sizeof(GlobalBlackMapEntry); + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; + + blackmap_ctx = (struct BlackMapCtx *) palloc(sizeof(struct BlackMapCtx)); + blackmap_ctx->blackmap = hash_create("blackmap_ctx blackmap", + 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); + hash_seq_init(&hash_seq, disk_quota_black_map); + while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) + { + GlobalBlackMapEntry *local_blackmap_entry = NULL; + local_blackmap_entry = hash_search(blackmap_ctx->blackmap, + &blackmap_entry->keyitem, + HASH_ENTER_NULL, NULL); + if (local_blackmap_entry) + { + memcpy(&local_blackmap_entry->keyitem, + &blackmap_entry->keyitem, sizeof(BlackMapEntry)); + local_blackmap_entry->segexceeded = blackmap_entry->segexceeded; + memcpy(&local_blackmap_entry->auxblockinfo, + &blackmap_entry->auxblockinfo, sizeof(BlackMapEntry)); + } + } + LWLockRelease(diskquota_locks.black_map_lock); + + /* Setup first calling context. */ + hash_seq_init(&(blackmap_ctx->blackmap_seq), + blackmap_ctx->blackmap); + funcctx->user_fctx = (void *) blackmap_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + blackmap_ctx = (struct BlackMapCtx *) funcctx->user_fctx; + + while ((blackmap_entry = hash_seq_search(&(blackmap_ctx->blackmap_seq))) != NULL) + { + Datum result; + Datum values[9]; + bool nulls[9]; + HeapTuple tuple; + BlackMapEntry keyitem; + char targettype_str[32]; + RelFileNode blocked_relfilenode; + + memcpy(&blocked_relfilenode, + &blackmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); + /* + * If the blackmap entry is indexed by relfilenode, we dump the blocking + * condition from auxblockinfo. + */ + if (!OidIsValid(blocked_relfilenode.relNode)) + memcpy(&keyitem, &blackmap_entry->keyitem, sizeof(keyitem)); + else + memcpy(&keyitem, &blackmap_entry->auxblockinfo, sizeof(keyitem)); + memset(targettype_str, 0, sizeof(targettype_str)); + + switch ((QuotaType) keyitem.targettype) + { + case ROLE_QUOTA: + strncpy(targettype_str, "ROLE_QUOTA", 10); + break; + case NAMESPACE_QUOTA: + strncpy(targettype_str, "NAMESPACE_QUOTA", 15); + break; + case ROLE_TABLESPACE_QUOTA: + strncpy(targettype_str, "ROLE_TABLESPACE_QUOTA", 21); + break; + case NAMESPACE_TABLESPACE_QUOTA: + strncpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", 26); + break; + default: + strncpy(targettype_str, "UNKNOWN", 7); + break; + } + + values[0] = CStringGetTextDatum(targettype_str); + values[1] = ObjectIdGetDatum(keyitem.targetoid); + values[2] = ObjectIdGetDatum(keyitem.databaseoid); + values[3] = ObjectIdGetDatum(keyitem.tablespaceoid); + values[4] = BoolGetDatum(blackmap_entry->segexceeded); + values[5] = ObjectIdGetDatum(blocked_relfilenode.dbNode); + values[6] = ObjectIdGetDatum(blocked_relfilenode.spcNode); + values[7] = ObjectIdGetDatum(blocked_relfilenode.relNode); + values[8] = Int32GetDatum(GpIdentity.segindex); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + SRF_RETURN_DONE(funcctx); } diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out new file mode 100644 index 00000000000..ee1514ada56 --- /dev/null +++ b/tests/isolation2/expected/test_blackmap.out @@ -0,0 +1,241 @@ +-- +-- This file contains tests for dispatching blackmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW(targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ (SELECT reltablespace FROM pg_class WHERE relname=rel::text), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t1. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t2. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t3. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE +CREATE INDEX blocked_t4_index ON blocked_t4(i); +CREATE +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t4. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=86819) +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=86819) +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP +DROP TABLE blocked_t2; +DROP +DROP TABLE blocked_t3; +DROP +DROP TABLE blocked_t4; +DROP +DROP TABLE blocked_t5; +DROP +DROP TABLE blocked_t6; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index b36d77dda18..85ec69a9961 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -1,4 +1,5 @@ test: init test: prepare test: test_relation_size +test: test_blackmap test: cleanup diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_blackmap.sql new file mode 100644 index 00000000000..2de0691bf3e --- /dev/null +++ b/tests/isolation2/sql/test_blackmap.sql @@ -0,0 +1,178 @@ +-- +-- This file contains tests for dispatching blackmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_blackmap( /*in func*/ + ARRAY[ /*in func*/ + ROW(targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ + (SELECT reltablespace FROM pg_class WHERE relname=rel::text), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.blackmap_entry[], /*in func*/ + ARRAY[rel]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ +LANGUAGE 'plpgsql'; + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t1. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t2. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t3. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Insert a small amount of data into blocked_t4. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +-- Dispatch blackmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: + +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLE blocked_t6; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 9bda7c1f8e3..d83acd1435b 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -18,4 +18,5 @@ test: test_pause_and_resume test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly +test: test_blackmap test: clean diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_blackmap.out new file mode 100644 index 00000000000..a85e94ec123 --- /dev/null +++ b/tests/regress/expected/test_blackmap.out @@ -0,0 +1,283 @@ +-- +-- This file contains tests for dispatching and quering blackmap. +-- +CREATE SCHEMA s_blackmap; +SET search_path TO s_blackmap; +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) + RETURNS text AS $$ + BEGIN + RETURN COALESCE( + REGEXP_REPLACE(given_name, + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', + '\1' || + (SELECT relname FROM pg_class + WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); + END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) + RETURNS void AS $$ + DECLARE + bt int; + targetoid oid; + BEGIN + CASE block_type + WHEN 'NAMESPACE' THEN + bt = 0; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE' THEN + bt = 1; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'NAMESPACE_TABLESPACE' THEN + bt = 2; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE_TABLESPACE' THEN + bt = 3; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + END CASE; + PERFORM diskquota.refresh_blackmap( + ARRAY[ + ROW(targetoid, + (SELECT oid FROM pg_database WHERE datname=current_database()), + (SELECT reltablespace FROM pg_class WHERE relname=rel::text), + bt, + false) + ]::diskquota.blackmap_entry[], + ARRAY[rel]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + END; $$ +LANGUAGE 'plpgsql'; +-- +-- 1. Create an ordinary table and add its oid to blackmap on seg0. +-- Check that it's relfilenode is blocked on seg0 by variouts conditions. +-- +CREATE TABLE blocked_t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + relname | target_type | namespace_matched +------------+-----------------+------------------- + blocked_t1 | NAMESPACE_QUOTA | t +(1 row) + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + relname | target_type | owner_matched +------------+-------------+--------------- + blocked_t1 | ROLE_QUOTA | t +(1 row) + +-- Create a tablespace to test the rest of blocking types. +\! mkdir /tmp/blocked_space +CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; +ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + relname | target_type | namespace_matched | tablespace_matched +------------+----------------------------+-------------------+-------------------- + blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t +(1 row) + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relowner) AS owner_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + relname | target_type | owner_matched | tablespace_matched +------------+-----------------------+---------------+-------------------- + blocked_t1 | ROLE_TABLESPACE_QUOTA | t | t +(1 row) + +-- +-- 2. Test that the relfilenodes of toast relation together with its +-- index are blocked on seg0. +-- +CREATE TABLE blocked_t2(i text); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- Insert an entry for blocked_t2 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +---------------------------+---------+-----------------+------------------- + pg_toast_blocked_t2_index | i | NAMESPACE_QUOTA | f + pg_toast_blocked_t2 | t | NAMESPACE_QUOTA | f + blocked_t2 | r | NAMESPACE_QUOTA | t +(3 rows) + +-- +-- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t3(i int) WITH (appendonly=true); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE INDEX blocked_t3_index ON blocked_t3(i); +-- Insert an entry for blocked_t3 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly +-- index relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t3 | M | NAMESPACE_QUOTA | f + pg_aoseg_blocked_t3 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3 | b | NAMESPACE_QUOTA | f + blocked_t3 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE INDEX blocked_t4_index ON blocked_t4(i); +-- Insert an entry for blocked_t4 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t4 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t4 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4 | b | NAMESPACE_QUOTA | f + blocked_t4 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +CREATE INDEX blocked_t5_index ON blocked_t5(i); +-- Insert an entry for blocked_t5 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast +-- index relation and appendonly relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_toast_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_toast_blocked_t5 | t | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t5 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t5 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5 | b | NAMESPACE_QUOTA | f + blocked_t5 | r | NAMESPACE_QUOTA | t +(8 rows) + +-- Do some clean-ups. +DROP FUNCTION replace_oid_with_relname(text); +DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLESPACE blocked_space; +\! rm -rf /tmp/blocked_space +SET search_path TO DEFAULT; +DROP SCHEMA s_blackmap; diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_blackmap.sql new file mode 100644 index 00000000000..edab03f8dd5 --- /dev/null +++ b/tests/regress/sql/test_blackmap.sql @@ -0,0 +1,196 @@ +-- +-- This file contains tests for dispatching and quering blackmap. +-- + +CREATE SCHEMA s_blackmap; +SET search_path TO s_blackmap; + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) + RETURNS text AS $$ + BEGIN + RETURN COALESCE( + REGEXP_REPLACE(given_name, + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', + '\1' || + (SELECT relname FROM pg_class + WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); + END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) + RETURNS void AS $$ + DECLARE + bt int; + targetoid oid; + BEGIN + CASE block_type + WHEN 'NAMESPACE' THEN + bt = 0; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE' THEN + bt = 1; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'NAMESPACE_TABLESPACE' THEN + bt = 2; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE_TABLESPACE' THEN + bt = 3; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + END CASE; + PERFORM diskquota.refresh_blackmap( + ARRAY[ + ROW(targetoid, + (SELECT oid FROM pg_database WHERE datname=current_database()), + (SELECT reltablespace FROM pg_class WHERE relname=rel::text), + bt, + false) + ]::diskquota.blackmap_entry[], + ARRAY[rel]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + END; $$ +LANGUAGE 'plpgsql'; + +-- +-- 1. Create an ordinary table and add its oid to blackmap on seg0. +-- Check that it's relfilenode is blocked on seg0 by variouts conditions. +-- +CREATE TABLE blocked_t1(i int); + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + +-- Create a tablespace to test the rest of blocking types. +\! mkdir /tmp/blocked_space +CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; +ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + +-- Insert an entry for blocked_t1 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relowner) AS owner_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + +-- +-- 2. Test that the relfilenodes of toast relation together with its +-- index are blocked on seg0. +-- +CREATE TABLE blocked_t2(i text); +-- Insert an entry for blocked_t2 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + +-- +-- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t3(i int) WITH (appendonly=true); +CREATE INDEX blocked_t3_index ON blocked_t3(i); +-- Insert an entry for blocked_t3 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly +-- index relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + +-- +-- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +-- Insert an entry for blocked_t4 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + +-- +-- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column); +CREATE INDEX blocked_t5_index ON blocked_t5(i); +-- Insert an entry for blocked_t5 to blackmap on seg0. +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); + +-- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast +-- index relation and appendonly relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.blackmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + ORDER BY rel.relname DESC; + +-- Do some clean-ups. +DROP FUNCTION replace_oid_with_relname(text); +DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLESPACE blocked_space; +\! rm -rf /tmp/blocked_space +SET search_path TO DEFAULT; +DROP SCHEMA s_blackmap; From a60fdc8dcb10876aed8c67cc523a2281286fc116 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 6 Dec 2021 15:47:34 +0800 Subject: [PATCH 092/330] Fix incorrect JOIN condition in test_blackmap.sql (#101) This PR fixes incorrect JOIN condition in test_blackmap.sql. The relfilenodes are not always different across segments. Hence, we should add an additional JOIN condition to the test case or it will produce unstable results. --- tests/regress/expected/test_blackmap.out | 16 ++++++++-------- tests/regress/sql/test_blackmap.sql | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_blackmap.out index a85e94ec123..96d652a3a05 100644 --- a/tests/regress/expected/test_blackmap.out +++ b/tests/regress/expected/test_blackmap.out @@ -70,7 +70,7 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | namespace_matched ------------+-----------------+------------------- blocked_t1 | NAMESPACE_QUOTA | t @@ -87,7 +87,7 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | owner_matched ------------+-------------+--------------- blocked_t1 | ROLE_QUOTA | t @@ -110,7 +110,7 @@ SELECT rel.relname, be.target_type, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | namespace_matched | tablespace_matched ------------+----------------------------+-------------------+-------------------- blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t @@ -129,7 +129,7 @@ SELECT rel.relname, be.target_type, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | owner_matched | tablespace_matched ------------+-----------------------+---------------+-------------------- blocked_t1 | ROLE_TABLESPACE_QUOTA | t | t @@ -156,7 +156,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched ---------------------------+---------+-----------------+------------------- @@ -187,7 +187,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched -------------------------------+---------+-----------------+------------------- @@ -221,7 +221,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched -------------------------------+---------+-----------------+------------------- @@ -255,7 +255,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched -------------------------------+---------+-----------------+------------------- diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_blackmap.sql index edab03f8dd5..ffd6568960c 100644 --- a/tests/regress/sql/test_blackmap.sql +++ b/tests/regress/sql/test_blackmap.sql @@ -69,7 +69,7 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- Insert an entry for blocked_t1 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); @@ -78,7 +78,7 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- Create a tablespace to test the rest of blocking types. \! mkdir /tmp/blocked_space @@ -94,7 +94,7 @@ SELECT rel.relname, be.target_type, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- Insert an entry for blocked_t1 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); @@ -105,7 +105,7 @@ SELECT rel.relname, be.target_type, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0; + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- -- 2. Test that the relfilenodes of toast relation together with its @@ -122,7 +122,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; -- @@ -141,7 +141,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; -- @@ -160,7 +160,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; -- @@ -179,7 +179,7 @@ SELECT replace_oid_with_relname(rel.relname), (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, gp_dist_random('diskquota.blackmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; -- Do some clean-ups. From 6635f473006e31995134a47e1f0397376058d036 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Tue, 7 Dec 2021 11:10:00 +0800 Subject: [PATCH 093/330] calculate table size for hard limit (#100) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add calculate_table_size() to calculate any table's size, including uncommitted table. 1. Monitor active table in master. 2. Replace relation_open() with SearchSysCache(), pg_appendonly and pg_index to fetch Form_pg_class of table and index, to avoid DEADLOCK. 3. Use DiskQuotaRelationCacheEntry to calculate table size, instead of pg_table_size(), to avoid DEADLOCK. Co-authored-by: hzhang2 Co-authored-by: Xuebin Su (苏学斌) Co-authored-by: Xing Guo --- diskquota.h | 2 + diskquota_utility.c | 108 ++++++++- enforcement.c | 9 +- gp_activetable.c | 69 +----- quotamodel.c | 209 +++++++----------- relation_cache.c | 157 +++++++++++++ relation_cache.h | 2 +- tests/regress/diskquota_schedule | 1 + .../expected/test_uncommitted_table_size.out | 196 ++++++++++++++++ .../sql/test_uncommitted_table_size.sql | 78 +++++++ 10 files changed, 626 insertions(+), 205 deletions(-) create mode 100644 tests/regress/expected/test_uncommitted_table_size.out create mode 100644 tests/regress/sql/test_uncommitted_table_size.sql diff --git a/diskquota.h b/diskquota.h index 05614f40b20..bb355c2e480 100644 --- a/diskquota.h +++ b/diskquota.h @@ -121,4 +121,6 @@ extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); +extern List* diskquota_get_index_list(Oid relid); +extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index bbb0096e093..4cfe984359e 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1175,14 +1175,9 @@ get_rel_oid_list(void) oid = DatumGetObjectId(SPI_getbinval(tup,tupdesc, 1, &isnull)); if (!isnull) { - Relation relation; - List *indexIds; - relation = try_relation_open(oid, AccessShareLock, false); - if (!relation) - continue; - + List *indexIds; oidlist = lappend_oid(oidlist, oid); - indexIds = RelationGetIndexList(relation); + indexIds = diskquota_get_index_list(oid); if (indexIds != NIL ) { foreach(l, indexIds) @@ -1190,7 +1185,6 @@ get_rel_oid_list(void) oidlist = lappend_oid(oidlist, lfirst_oid(l)); } } - relation_close(relation, AccessShareLock); list_free(indexIds); } } @@ -1309,3 +1303,101 @@ diskquota_relation_open(Oid relid, LOCKMODE mode) PG_END_TRY(); return success_open ? rel : NULL; } + +List* +diskquota_get_index_list(Oid relid) +{ + Relation indrel; + SysScanDesc indscan; + ScanKeyData skey; + HeapTuple htup; + List *result = NIL; + + /* Prepare to scan pg_index for entries having indrelid = this rel. */ + ScanKeyInit(&skey, + Anum_pg_index_indrelid, + BTEqualStrategyNumber, F_OIDEQ, + relid); + + indrel = heap_open(IndexRelationId, AccessShareLock); + indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, + NULL, 1, &skey); + + while (HeapTupleIsValid(htup = systable_getnext(indscan))) + { + Form_pg_index index = (Form_pg_index) GETSTRUCT(htup); + + /* + * Ignore any indexes that are currently being dropped. This will + * prevent them from being searched, inserted into, or considered in + * HOT-safety decisions. It's unsafe to touch such an index at all + * since its catalog entries could disappear at any instant. + */ + if (!IndexIsLive(index)) + continue; + + /* Add index's OID to result list in the proper order */ + result = lappend_oid(result, index->indexrelid); + } + + systable_endscan(indscan); + + heap_close(indrel, AccessShareLock); + + return result; +} + +/* + * Get auxiliary relations oid by searching the pg_appendonly table. + */ +void +diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid) +{ + ScanKeyData skey; + SysScanDesc scan; + TupleDesc tupDesc; + Relation aorel; + HeapTuple htup; + Datum auxoid; + bool isnull; + + ScanKeyInit(&skey, Anum_pg_appendonly_relid, + BTEqualStrategyNumber, F_OIDEQ, reloid); + aorel = heap_open(AppendOnlyRelationId, AccessShareLock); + tupDesc = RelationGetDescr(aorel); + scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, + true /*indexOk*/, NULL /*snapshot*/, + 1 /*nkeys*/, &skey); + while (HeapTupleIsValid(htup = systable_getnext(scan))) + { + if (segrelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_segrelid, + tupDesc, &isnull); + if (!isnull) + *segrelid = DatumGetObjectId(auxoid); + } + + if (blkdirrelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_blkdirrelid, + tupDesc, &isnull); + if (!isnull) + *blkdirrelid = DatumGetObjectId(auxoid); + } + + if (visimaprelid) + { + auxoid = heap_getattr(htup, + Anum_pg_appendonly_visimaprelid, + tupDesc, &isnull); + if (!isnull) + *visimaprelid = DatumGetObjectId(auxoid); + } + } + + systable_endscan(scan); + heap_close(aorel, AccessShareLock); +} diff --git a/enforcement.c b/enforcement.c index d8ccc13ec54..b6366ebad4c 100644 --- a/enforcement.c +++ b/enforcement.c @@ -51,7 +51,6 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) { List *indexIds; ListCell *oid; - Relation relation; RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); /* see ExecCheckRTEPerms() */ @@ -72,11 +71,7 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) */ quota_check_common(rte->relid, NULL /*relfilenode*/); /* Check the indexes of the this relation */ - relation = try_relation_open(rte->relid, AccessShareLock, false); - if (!relation) - continue; - - indexIds = RelationGetIndexList(relation); + indexIds = diskquota_get_index_list(rte->relid); PG_TRY(); { if (indexIds != NIL ) @@ -89,12 +84,10 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) } PG_CATCH(); { - relation_close(relation, AccessShareLock); list_free(indexIds); PG_RE_THROW(); } PG_END_TRY(); - relation_close(relation, AccessShareLock); list_free(indexIds); } return true; diff --git a/gp_activetable.c b/gp_activetable.c index 004427927e5..573624e0ac8 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -207,8 +207,8 @@ report_relation_cache_helper(Oid relid) { bool found; - /* We do not collect the active table in either master or mirror segments */ - if (IS_QUERY_DISPATCHER() || IsRoleMirror()) + /* We do not collect the active table in mirror segments */ + if (IsRoleMirror()) { return; } @@ -242,8 +242,8 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) Oid dbid = relFileNode->node.dbNode; - /* We do not collect the active table in either master or mirror segments */ - if (IS_QUERY_DISPATCHER() || IsRoleMirror()) + /* We do not collect the active table in mirror segments */ + if (IsRoleMirror()) { return; } @@ -494,6 +494,7 @@ get_active_tables_stats(ArrayType *array) HASHCTL ctl; TableEntryKey key; DiskQuotaActiveTableEntry *entry; + bool found; Assert(ARR_ELEMTYPE(array) == OIDOID); @@ -530,68 +531,18 @@ get_active_tables_stats(ArrayType *array) } else { - MemoryContext oldcontext; - ResourceOwner oldowner; - relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); segId = GpIdentity.segindex; key.reloid = relOid; key.segid = segId; - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &key, HASH_ENTER, NULL); - entry->reloid = relOid; - entry->segid = segId; - - /* - * pg_table_size() may throw exceptions, in order not to abort the top level - * transaction, we start a subtransaction for it. This operation is expensive, - * but there're good reasons. E.g., - * When the subtransaction is aborted, the resources (e.g., locks) acquired - * in pg_table_size() are released in time. We can avoid potential deadlock - * risks by doing this. - */ - oldcontext = CurrentMemoryContext; - oldowner = CurrentResourceOwner; - - BeginInternalSubTransaction(NULL /* save point name */); - /* Run inside the function's memory context. */ - MemoryContextSwitchTo(oldcontext); - PG_TRY(); - { - /* call pg_table_size to get the active table size */ - entry->tablesize = (Size) DatumGetInt64(DirectFunctionCall1(pg_table_size, - ObjectIdGetDatum(relOid))); - -#ifdef FAULT_INJECTOR - SIMPLE_FAULT_INJECTOR("diskquota_fetch_table_stat"); -#endif - /* Commit the subtransaction. */ - ReleaseCurrentSubTransaction(); - MemoryContextSwitchTo(oldcontext); - CurrentResourceOwner = oldowner; - } - PG_CATCH(); + entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &key, HASH_ENTER, &found); + if (!found) { - ErrorData *edata; - - /* - * Save the error information, or we have no idea what is causing the - * exception. - */ - MemoryContextSwitchTo(oldcontext); - edata = CopyErrorData(); - FlushErrorState(); - - /* Abort the subtransaction and rollback. */ - RollbackAndReleaseCurrentSubTransaction(); - MemoryContextSwitchTo(oldcontext); - CurrentResourceOwner = oldowner; - elog(WARNING, "%s", edata->message); - FreeErrorData(edata); - - entry->tablesize = 0; + entry->reloid = relOid; + entry->segid = segId; + entry->tablesize = calculate_table_size(relOid); } - PG_END_TRY(); ptr = att_addlength_pointer(ptr, typlen, ptr); ptr = (char *) att_align_nominal(ptr, typalign); diff --git a/quotamodel.c b/quotamodel.c index 79671ecec3a..540eba15064 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -625,8 +625,8 @@ do_check_diskquota_state_is_ready(void) /* Add the dbid to watching list, so the hook can catch the table change*/ initStringInfo(&sql_command); - appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id');", - MyDatabaseId); + appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id') UNION ALL select -1, diskquota.update_diskquota_db_list(%u, 0);", + MyDatabaseId, MyDatabaseId); ret = SPI_execute(sql_command.data, true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -764,6 +764,33 @@ refresh_disk_quota_usage(bool is_init) return; } +static List* +merge_uncommitted_table_to_oidlist(List *oidlist) +{ + HASH_SEQ_STATUS iter; + DiskQuotaRelationCacheEntry *entry; + + if (relation_cache == NULL) + { + return oidlist; + } + + remove_committed_relation_from_cache(); + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((entry = hash_seq_search(&iter)) != NULL) + { + if (entry->primary_table_relid == entry->relid) + { + oidlist = lappend_oid(oidlist, entry->relid); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return oidlist; +} + /* * Incremental way to update the disk quota of every database objects * Recalculate the table's disk usage when it's a new table or active table. @@ -812,19 +839,41 @@ calculate_table_disk_usage(bool is_init) * and role_size_map */ oidlist = get_rel_oid_list(); + + oidlist = merge_uncommitted_table_to_oidlist(oidlist); + foreach(l, oidlist) { HeapTuple classTup; - Form_pg_class classForm; + Form_pg_class classForm = NULL; + Oid relnamespace = InvalidOid; + Oid relowner = InvalidOid; + Oid reltablespace = InvalidOid; relOid = lfirst_oid(l); classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); - if (!HeapTupleIsValid(classTup)) + if (HeapTupleIsValid(classTup)) { - elog(WARNING, "cache lookup failed for relation %u", relOid); - continue; + classForm = (Form_pg_class) GETSTRUCT(classTup); + relnamespace = classForm->relnamespace; + relowner = classForm->relowner; + reltablespace = classForm->reltablespace; + } + else + { + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + DiskQuotaRelationCacheEntry *relation_entry = hash_search(relation_cache, &relOid, HASH_FIND, NULL); + if (relation_entry == NULL) + { + elog(WARNING, "cache lookup failed for relation %u", relOid); + LWLockRelease(diskquota_locks.relation_cache_lock); + continue; + } + relnamespace = relation_entry->namespaceoid; + relowner = relation_entry->owneroid; + reltablespace = relation_entry->rnode.node.spcNode; + LWLockRelease(diskquota_locks.relation_cache_lock); } - classForm = (Form_pg_class) GETSTRUCT(classTup); /* * The segid is the same as the content id in gp_segment_configuration @@ -863,19 +912,7 @@ calculate_table_disk_usage(bool is_init) /* pretend process as utility mode, and append the table size on master */ Gp_role = GP_ROLE_UTILITY; - /* DirectFunctionCall1 may fail, since table maybe dropped by other backend */ - PG_TRY(); - { - /* call pg_table_size to get the active table size */ - active_table_entry->tablesize += (Size) DatumGetInt64(DirectFunctionCall1(pg_table_size, ObjectIdGetDatum(relOid))); - } - PG_CATCH(); - { - HOLD_INTERRUPTS(); - FlushErrorState(); - RESUME_INTERRUPTS(); - } - PG_END_TRY(); + active_table_entry->tablesize += calculate_table_size(relOid); Gp_role = GP_ROLE_DISPATCH; @@ -901,64 +938,69 @@ calculate_table_disk_usage(bool is_init) } /* if schema change, transfer the file size */ - if (tsentry->namespaceoid != classForm->relnamespace) + if (tsentry->namespaceoid != relnamespace) { transfer_table_for_quota( tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, - (Oid[]){classForm->relnamespace}, + (Oid[]){relnamespace}, key.segid); transfer_table_for_quota( tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){classForm->relnamespace, tsentry->tablespaceoid}, + (Oid[]){relnamespace, tsentry->tablespaceoid}, key.segid); - tsentry->namespaceoid = classForm->relnamespace; + tsentry->namespaceoid = relnamespace; } /* if owner change, transfer the file size */ - if (tsentry->owneroid != classForm->relowner) + if (tsentry->owneroid != relowner) { transfer_table_for_quota( tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, - (Oid[]){classForm->relowner}, + (Oid[]){relowner}, key.segid ); transfer_table_for_quota( tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){classForm->relowner, tsentry->tablespaceoid}, + (Oid[]){relowner, tsentry->tablespaceoid}, key.segid ); - tsentry->owneroid = classForm->relowner; + tsentry->owneroid = relowner; } - if (tsentry->tablespaceoid != classForm->reltablespace) + if (tsentry->tablespaceoid != reltablespace) { transfer_table_for_quota( tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){tsentry->namespaceoid, classForm->reltablespace}, + (Oid[]){tsentry->namespaceoid, reltablespace}, key.segid ); transfer_table_for_quota( tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){tsentry->owneroid, classForm->reltablespace}, + (Oid[]){tsentry->owneroid, reltablespace}, key.segid ); - tsentry->tablespaceoid = classForm->reltablespace; + tsentry->tablespaceoid = reltablespace; } } - heap_freetuple(classTup); + if (HeapTupleIsValid(classTup)) + { + heap_freetuple(classTup); + } } + list_free(oidlist); + hash_destroy(local_active_table_stat_map); /* @@ -1581,97 +1623,6 @@ export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) } } -/* - * The order of the returned index list is not guaranteed, Don't - * apply the relation_open() to the returned list, or deadlock - * may happen. - */ -static List* -GetIndexOidListByRelid(Oid reloid) -{ - List *result = NIL; - ScanKeyData skey; - SysScanDesc indscan; - Relation indrel; - HeapTuple htup; - - ScanKeyInit(&skey, Anum_pg_index_indrelid, - BTEqualStrategyNumber, F_OIDEQ, reloid); - indrel = heap_open(IndexRelationId, AccessShareLock); - indscan = systable_beginscan(indrel, IndexIndrelidIndexId, - true /*indexOk*/, NULL /*snapshot*/, - 1 /*nkeys*/, &skey); - while (HeapTupleIsValid(htup = systable_getnext(indscan))) - { - Form_pg_index index = (Form_pg_index) GETSTRUCT(htup); - - if (!IndexIsLive(index)) - continue; - - result = lappend_oid(result, index->indexrelid); - } - systable_endscan(indscan); - heap_close(indrel, AccessShareLock); - - return result; -} - -/* - * Get auxiliary relations oid by searching the pg_appendonly table. - */ -static void -GetAppendOnlyEntryAuxOidListByRelid(Oid reloid, Oid *segrelid, - Oid *blkdirrelid, Oid *visimaprelid) -{ - ScanKeyData skey; - SysScanDesc scan; - TupleDesc tupDesc; - Relation aorel; - HeapTuple htup; - Datum auxoid; - bool isnull; - - ScanKeyInit(&skey, Anum_pg_appendonly_relid, - BTEqualStrategyNumber, F_OIDEQ, reloid); - aorel = heap_open(AppendOnlyRelationId, AccessShareLock); - tupDesc = RelationGetDescr(aorel); - scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, - true /*indexOk*/, NULL /*snapshot*/, - 1 /*nkeys*/, &skey); - while (HeapTupleIsValid(htup = systable_getnext(scan))) - { - if (segrelid) - { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_segrelid, - tupDesc, &isnull); - if (!isnull) - *segrelid = DatumGetObjectId(auxoid); - } - - if (blkdirrelid) - { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_blkdirrelid, - tupDesc, &isnull); - if (!isnull) - *blkdirrelid = DatumGetObjectId(auxoid); - } - - if (visimaprelid) - { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_visimaprelid, - tupDesc, &isnull); - if (!isnull) - *visimaprelid = DatumGetObjectId(auxoid); - } - } - - systable_endscan(scan); - heap_close(aorel, AccessShareLock); -} - /* * refresh_blackmap() takes two arguments. * The first argument is an array of blackmap entries on QD. @@ -1840,26 +1791,26 @@ refresh_blackmap(PG_FUNCTION_ARGS) if (OidIsValid(toastrelid)) { oid_list = lappend_oid(oid_list, toastrelid); - oid_list = list_concat(oid_list, GetIndexOidListByRelid(toastrelid)); + oid_list = list_concat(oid_list, diskquota_get_index_list(toastrelid)); } /* Append ao auxiliary relations and their indexes to the oid_list if any. */ - GetAppendOnlyEntryAuxOidListByRelid(active_oid, &aosegrelid, + diskquota_get_appendonly_aux_oid_list(active_oid, &aosegrelid, &aoblkdirrelid, &aovisimaprelid); if (OidIsValid(aosegrelid)) { oid_list = lappend_oid(oid_list, aosegrelid); - oid_list = list_concat(oid_list, GetIndexOidListByRelid(aosegrelid)); + oid_list = list_concat(oid_list, diskquota_get_index_list(aosegrelid)); } if (OidIsValid(aoblkdirrelid)) { oid_list = lappend_oid(oid_list, aoblkdirrelid); - oid_list = list_concat(oid_list, GetIndexOidListByRelid(aoblkdirrelid)); + oid_list = list_concat(oid_list, diskquota_get_index_list(aoblkdirrelid)); } if (OidIsValid(aovisimaprelid)) { oid_list = lappend_oid(oid_list, aovisimaprelid); - oid_list = list_concat(oid_list, GetIndexOidListByRelid(aovisimaprelid)); + oid_list = list_concat(oid_list, diskquota_get_index_list(aovisimaprelid)); } /* Iterate over the oid_list and add their relfilenodes to the blackmap. */ diff --git a/relation_cache.c b/relation_cache.c index 3049e8c698f..48bc19d6a52 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -3,6 +3,7 @@ #include "catalog/indexing.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "catalog/objectaccess.h" #include "executor/spi.h" @@ -405,3 +406,159 @@ show_relation_cache(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } + +static void +add_auxrelation_to_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *pentry) +{ + List *index_oids; + ListCell *cell; + + add_auxrelid_to_relation_entry(pentry, relid); + + index_oids = diskquota_get_index_list(relid); + foreach(cell, index_oids) + { + Oid idxrelid = lfirst_oid(cell); + add_auxrelid_to_relation_entry(pentry, idxrelid); + } + list_free(index_oids); +} + +static void +get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relation_entry) +{ + HeapTuple classTup; + Form_pg_class classForm; + Oid segrelid = InvalidOid; + Oid blkdirrelid = InvalidOid; + Oid visimaprelid = InvalidOid; + + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(classTup) || relation_entry == NULL) + { + return; + } + + classForm = (Form_pg_class) GETSTRUCT(classTup); + + relation_entry->relid = relid; + relation_entry->primary_table_relid = relid; + relation_entry->owneroid = classForm->relowner; + relation_entry->namespaceoid = classForm->relnamespace; + relation_entry->relstorage = classForm->relstorage; + relation_entry->rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? + classForm->reltablespace : DEFAULTTABLESPACE_OID; + relation_entry->rnode.node.dbNode = MyDatabaseId; + relation_entry->rnode.node.relNode = classForm->relfilenode; + relation_entry->rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? + TempRelBackendId : InvalidBackendId; + + /* toast table */ + if (OidIsValid(classForm->reltoastrelid)) + { + add_auxrelation_to_relation_entry(classForm->reltoastrelid, relation_entry); + } + + heap_freetuple(classTup); + + /* ao table */ + diskquota_get_appendonly_aux_oid_list(relid, &segrelid, &blkdirrelid, &visimaprelid); + if (OidIsValid(segrelid)) + { + add_auxrelation_to_relation_entry(segrelid, relation_entry); + } + if (OidIsValid(blkdirrelid)) + { + add_auxrelation_to_relation_entry(blkdirrelid, relation_entry); + } + if (OidIsValid(visimaprelid)) + { + add_auxrelation_to_relation_entry(visimaprelid, relation_entry); + } +} + +static void +get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry* entry) +{ + DiskQuotaRelationCacheEntry* tentry; + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + tentry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (tentry) + { + memcpy(entry, tentry, sizeof(DiskQuotaRelationCacheEntry)); + LWLockRelease(diskquota_locks.relation_cache_lock); + return; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + get_relation_entry_from_pg_class(relid, entry); +} + +static void +get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) +{ + DiskQuotaRelationCacheEntry *relation_cache_entry; + HeapTuple classTup; + Form_pg_class classForm; + + memset(rnode, 0, sizeof(RelFileNodeBackend)); + classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(classTup)) + { + classForm = (Form_pg_class) GETSTRUCT(classTup); + rnode->node.spcNode = OidIsValid(classForm->reltablespace) ? + classForm->reltablespace : DEFAULTTABLESPACE_OID; + rnode->node.dbNode = MyDatabaseId; + rnode->node.relNode = classForm->relfilenode; + rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? + TempRelBackendId : InvalidBackendId; + *relstorage = classForm->relstorage; + heap_freetuple(classTup); + remove_cache_entry(relid, InvalidOid); + return; + } + + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_cache_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); + if (relation_cache_entry) + { + *rnode = relation_cache_entry->rnode; + *relstorage = relation_cache_entry->relstorage; + } + LWLockRelease(diskquota_locks.relation_cache_lock); + + return; +} + + +static Size +do_calculate_table_size(DiskQuotaRelationCacheEntry *entry) +{ + Size tablesize = 0; + RelFileNodeBackend rnode; + char relstorage = 0; + Oid subrelid; + int i; + + get_relfilenode_by_relid(entry->relid, &rnode, &relstorage); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage); + + for (i = 0; i < entry->auxrel_num; i++) + { + subrelid = entry->auxrel_oid[i]; + get_relfilenode_by_relid(subrelid, &rnode, &relstorage); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage); + } + return tablesize; +} + +Size +calculate_table_size(Oid relid) +{ + DiskQuotaRelationCacheEntry entry = {0}; + + get_relation_entry(relid, &entry); + + return do_calculate_table_size(&entry); +} diff --git a/relation_cache.h b/relation_cache.h index 6817c9612d2..dde9a1a71bf 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -33,6 +33,6 @@ extern Oid get_uncommitted_table_relid(Oid relfilenode); extern void update_relation_cache(Oid relid); extern Oid get_primary_table_oid(Oid relid); extern void remove_committed_relation_from_cache(void); - +extern Size calculate_table_size(Oid relid); #endif diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index d83acd1435b..bd74bda51bb 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -2,6 +2,7 @@ test: init test: prepare test: test_relation_size test: test_relation_cache +test: test_uncommitted_table_size # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out new file mode 100644 index 00000000000..17f08f4c11b --- /dev/null +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -0,0 +1,196 @@ +-- temp table +begin; +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t1 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +DROP table t1; +-- heap table +begin; +CREATE TABLE t2(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t2 SELECT generate_series(1, 100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t2 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +-- heap table index +begin; +CREATE INDEX idx2 on t2(i); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + idx2 | 2490368 | -1 +(1 row) + +SELECT pg_table_size('idx2'); + pg_table_size +--------------- + 2490368 +(1 row) + +commit; +DROP table t2; +-- toast table +begin; +CREATE TABLE t3(t text); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + t3 | 393216 | -1 +(1 row) + +SELECT pg_table_size('t3'); + pg_table_size +--------------- + 393216 +(1 row) + +commit; +DROP table t3; +-- AO table +begin; +CREATE TABLE ao (i int) WITH (appendonly=true); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO ao SELECT generate_series(1, 100000); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao | 1263784 | -1 +(1 row) + +SELECT pg_table_size('ao'); + pg_table_size +--------------- + 1263784 +(1 row) + +commit; +-- AOCS table index +begin; +CREATE INDEX ao_idx on ao(i); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao_idx | 2490368 | -1 +(1 row) + +SELECT pg_table_size('ao_idx'); + pg_table_size +--------------- + 2490368 +(1 row) + +commit; +DROP TABLE ao; +-- AOCS table +begin; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+----------+------- + aocs | 10485912 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 10485912 +(1 row) + +commit; +-- AOCS table index +begin; +CREATE INDEX aocs_idx on aocs(i); +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; + tableid | size | segid +----------+--------+------- + aocs_idx | 524288 | -1 +(1 row) + +SELECT pg_table_size('aocs_idx'); + pg_table_size +--------------- + 524288 +(1 row) + +commit; +DROP TABLE aocs; diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql new file mode 100644 index 00000000000..28e7f280fb9 --- /dev/null +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -0,0 +1,78 @@ +-- temp table +begin; +CREATE TEMP TABLE t1(i int); +INSERT INTO t1 SELECT generate_series(1, 100000); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; +SELECT pg_table_size('t1'); +commit; + +DROP table t1; + +-- heap table +begin; +CREATE TABLE t2(i int); +INSERT INTO t2 SELECT generate_series(1, 100000); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; +SELECT pg_table_size('t2'); +commit; + +-- heap table index +begin; +CREATE INDEX idx2 on t2(i); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; +SELECT pg_table_size('idx2'); +commit; + +DROP table t2; + +-- toast table +begin; +CREATE TABLE t3(t text); +INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; +SELECT pg_table_size('t3'); +commit; + +DROP table t3; + +-- AO table +begin; +CREATE TABLE ao (i int) WITH (appendonly=true); +INSERT INTO ao SELECT generate_series(1, 100000); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; +SELECT pg_table_size('ao'); +commit; + +-- AOCS table index +begin; +CREATE INDEX ao_idx on ao(i); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; +SELECT pg_table_size('ao_idx'); +commit; + +DROP TABLE ao; + +-- AOCS table +begin; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; +SELECT pg_table_size('aocs'); +commit; + +-- AOCS table index +begin; +CREATE INDEX aocs_idx on aocs(i); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; +SELECT pg_table_size('aocs_idx'); +commit; + +DROP TABLE aocs; From e71357229804cb21f5ea88576d2187ec0dd5d077 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 8 Dec 2021 16:30:51 +0800 Subject: [PATCH 094/330] Add support for adding uncommitted relations to blackmap. (#102) This patch adds support for adding uncommitted relations to blackmap on segment servers. Most of the codes share similar logic with adding committed relations to blackmap. Test will be added in the next following commits. --- quotamodel.c | 163 +++++-- tests/isolation2/expected/test_blackmap.out | 452 +++++++++++++++++++- tests/isolation2/sql/test_blackmap.sql | 330 ++++++++++++++ 3 files changed, 894 insertions(+), 51 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 540eba15064..8c67a8e6dff 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1430,6 +1430,36 @@ check_blackmap_by_relfilenode(RelFileNode relfilenode) return true; } +/* + * This function takes relowner, relnamespace, reltablespace as arguments, + * prepares the searching key of the global blackmap for us. + */ +static void +prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, + Oid relowner, Oid relnamespace, Oid reltablespace) +{ + Assert(keyitem != NULL); + memset(keyitem, 0, sizeof(BlackMapEntry)); + if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) + keyitem->targetoid = relowner; + else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem->targetoid = relnamespace; + else + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown quota type: %d", type))); + + if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) + keyitem->tablespaceoid = reltablespace; + else + { + /* refer to add_quota_to_blacklist */ + keyitem->tablespaceoid = InvalidOid; + } + keyitem->databaseoid = MyDatabaseId; + keyitem->targettype = type; +} + /* * Given table oid, check whether quota limit * of table's schema or table's owner are reached. @@ -1454,32 +1484,7 @@ check_blackmap_by_reloid(Oid reloid) LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) - { - keyitem.targetoid = ownerOid; - } - else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) - { - keyitem.targetoid = nsOid; - } - else - { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown quota type: %d", type))); - } - if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) - { - keyitem.tablespaceoid = tablespaceoid; - } - else - { - /* refer to add_quota_to_blacklist */ - keyitem.tablespaceoid = InvalidOid; - } - keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = type; - memset(&keyitem.relfilenode, 0, sizeof(RelFileNode)); + prepare_blackmap_search_key(&keyitem, type, ownerOid, nsOid, tablespaceoid); entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); @@ -1686,6 +1691,13 @@ refresh_blackmap(PG_FUNCTION_ARGS) hashctl.hcxt = CurrentMemoryContext; hashctl.hash = tag_hash; + /* + * Since uncommitted relations' information and the global blackmap entries + * are cached in shared memory. The memory regions are guarded by lightweight + * locks. In order not to hold multiple locks at the same time, We add blackmap + * entries into the local_blackmap below and then flush the content of the + * local_blackmap to the global blackmap at the end of this UDF. + */ local_blackmap = hash_create("local_blackmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); @@ -1754,22 +1766,8 @@ refresh_blackmap(PG_FUNCTION_ARGS) for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - /* - * Check that if the current relation should be blocked. - * FIXME: The logic of preparing the blackmap searching - * key is identical to check_blackmap_by_reloid(), we can - * make it into a static helper function. - */ - memset(&keyitem, 0, sizeof(BlackMapEntry)); - if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) - keyitem.targetoid = relowner; - else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) - keyitem.targetoid = relnamespace; - if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) - keyitem.tablespaceoid = reltablespace; - keyitem.databaseoid = MyDatabaseId; - keyitem.targettype = type; - + /* Check that if the current relation should be blocked. */ + prepare_blackmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); blackmapentry = hash_search(local_blackmap, &keyitem, HASH_FIND, &found); if (found && blackmapentry) @@ -1835,8 +1833,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); memcpy(&blocked_filenode_keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); - blocked_filenode_entry = hash_search(disk_quota_black_map, + blocked_filenode_entry = hash_search(local_blackmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); if (!found && blocked_filenode_entry) @@ -1844,7 +1841,6 @@ refresh_blackmap(PG_FUNCTION_ARGS) memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); blocked_filenode_entry->segexceeded = blackmapentry->segexceeded; } - LWLockRelease(diskquota_locks.black_map_lock); } } /* @@ -1855,8 +1851,85 @@ refresh_blackmap(PG_FUNCTION_ARGS) } } } + else + { + /* + * We cannot fetch the relation from syscache. It may be an uncommitted relation. + * Let's try to fetch it from relation_cache. + */ + DiskQuotaRelationCacheEntry *relation_cache_entry; + bool found; + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + relation_cache_entry = hash_search(relation_cache, &active_oid, + HASH_FIND, &found); + if (found && relation_cache_entry) + { + Oid relnamespace = relation_cache_entry->namespaceoid; + Oid reltablespace = relation_cache_entry->rnode.node.spcNode; + Oid relowner = relation_cache_entry->owneroid; + BlackMapEntry keyitem; + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + /* Check that if the current relation should be blocked. */ + prepare_blackmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); + blackmapentry = hash_search(local_blackmap, &keyitem, HASH_FIND, &found); + + if (found && blackmapentry) + { + List *oid_list = NIL; + ListCell *cell = NULL; + + /* Collect the relation oid together with its auxiliary relations' oid. */ + oid_list = lappend_oid(oid_list, active_oid); + for (int auxoidcnt = 0; auxoidcnt < relation_cache_entry->auxrel_num; ++auxoidcnt) + oid_list = lappend_oid(oid_list, relation_cache_entry->auxrel_oid[auxoidcnt]); + + foreach(cell, oid_list) + { + bool found; + GlobalBlackMapEntry *blocked_filenode_entry; + BlackMapEntry blocked_filenode_keyitem; + Oid curr_oid = lfirst_oid(cell); + + relation_cache_entry = hash_search(relation_cache, + &curr_oid, HASH_FIND, &found); + if (found && relation_cache_entry) + { + memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); + memcpy(&blocked_filenode_keyitem.relfilenode, + &relation_cache_entry->rnode.node, sizeof(RelFileNode)); + + blocked_filenode_entry = hash_search(local_blackmap, + &blocked_filenode_keyitem, + HASH_ENTER_NULL, &found); + if (!found && blocked_filenode_entry) + { + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); + blocked_filenode_entry->segexceeded = blackmapentry->segexceeded; + } + } + } + } + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } } + /* Flush the content of local_blackmap to the global blackmap. */ + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, local_blackmap); + while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) + { + bool found; + GlobalBlackMapEntry *new_entry; + new_entry = hash_search(disk_quota_black_map, &blackmapentry->keyitem, + HASH_ENTER_NULL, &found); + if (!found && new_entry) + memcpy(new_entry, blackmapentry, sizeof(GlobalBlackMapEntry)); + } + LWLockRelease(diskquota_locks.black_map_lock); + SPI_finish(); PG_RETURN_VOID(); } diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out index ee1514ada56..b972c8beadc 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_blackmap.out @@ -36,7 +36,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -75,7 +75,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -114,7 +114,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -155,7 +155,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=86819) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -187,7 +187,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=86819) +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -218,7 +218,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=86819) +ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -239,3 +239,443 @@ DROP TABLE blocked_t5; DROP DROP TABLE blocked_t6; DROP + +-- +-- Below are helper functions for testing adding uncommitted relations to blackmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE plpythonu; +CREATE +-- end_ignore +CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); +CREATE + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: f.write(v['row'][1:-1] + '\n') $$ LANGUAGE plpythonu; +CREATE + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpythonu; +CREATE + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE + +-- This function helps dispatch blackmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW(targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663 role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i text); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+---------------------------+-----------------+------------ + 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(3 rows) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +1: ABORT; +ABORT +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_blackmap +------------------ + +(1 row) diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_blackmap.sql index 2de0691bf3e..340aa723df5 100644 --- a/tests/isolation2/sql/test_blackmap.sql +++ b/tests/isolation2/sql/test_blackmap.sql @@ -176,3 +176,333 @@ DROP TABLE blocked_t3; DROP TABLE blocked_t4; DROP TABLE blocked_t5; DROP TABLE blocked_t6; + +-- +-- Below are helper functions for testing adding uncommitted relations to blackmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE plpythonu; +-- end_ignore +CREATE TYPE cached_relation_entry AS ( + reloid oid, + relname text, + relowner oid, + relnamespace oid, + reltablespace oid, + relfilenode oid, + segid int); + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) + RETURNS void +AS $$ + rv = plpy.execute(""" + SELECT (oid, relname, relowner, + relnamespace, reltablespace, + relfilenode, gp_segment_id)::cached_relation_entry + FROM gp_dist_random('pg_class') + """) + with open(filename, 'wt') as f: + for v in rv: + f.write(v['row'][1:-1] + '\n') +$$ LANGUAGE plpythonu; + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) + RETURNS SETOF cached_relation_entry +AS $$ + with open(filename) as f: + for l in f: + r = l.split(',') + yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) +$$ LANGUAGE plpythonu; + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) + RETURNS text AS $$ /*in func*/ + BEGIN /*in func*/ + RETURN COALESCE( /*in func*/ + REGEXP_REPLACE(given_name, /*in func*/ + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ + '\1' || /*in func*/ + (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ + END; /*in func*/ +$$ LANGUAGE plpgsql; + +-- This function helps dispatch blackmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_blackmap( /*in func*/ + ARRAY[ /*in func*/ + ROW(targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ + (SELECT reltablespace /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.blackmap_entry[], /*in func*/ + ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ +LANGUAGE 'plpgsql'; + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +1: CREATE TABLE blocked_t7(i int); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i text); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column); +1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +-- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, + replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + be.target_type, be.target_oid + FROM gp_dist_random('diskquota.blackmap') AS be, + read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 + ORDER BY relname DESC; +SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; +1<: +1: ABORT; +-- Clean up the blackmap on seg0. +SELECT diskquota.refresh_blackmap( + ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; From 0c4da6cf9ffe88efee5faa02f77e1f3ad3fa1c54 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Fri, 10 Dec 2021 15:08:22 +0800 Subject: [PATCH 095/330] Fix bug: calculate AO table size in CTAS (#105) The extension file with (segno=0, column=1) is not traversed by ao_foreach_extent_file(), we need to handle the size of it additionally. Co-authored-by: hzhang2 Co-authored-by: Xing Guo --- diskquota_utility.c | 6 +++ .../expected/test_uncommitted_table_size.out | 51 ++++++++++++++++++- .../sql/test_uncommitted_table_size.sql | 20 +++++++- 3 files changed, 75 insertions(+), 2 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 4cfe984359e..b1a19d7214a 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1252,6 +1252,12 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) RelationFileStatCtx ctx = {0}; ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); ctx.size = 0; + /* + * Since the extension file with (segno=0, column=1) is not traversed by + * ao_foreach_extent_file(), we need to handle the size of it additionally. + * See comments in ao_foreach_extent_file() for details. + */ + relation_file_stat(0, &ctx); ao_foreach_extent_file(relation_file_stat, &ctx); return ctx.size; } diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index 17f08f4c11b..f4e465ee1c1 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -123,7 +123,7 @@ SELECT pg_table_size('ao'); (1 row) commit; --- AOCS table index +-- AO table index begin; CREATE INDEX ao_idx on ao(i); SELECT pg_sleep(5); @@ -144,6 +144,30 @@ SELECT pg_table_size('ao_idx'); 2490368 (1 row) +commit; +DROP TABLE ao; +-- AO table CTAS +begin; +CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry. +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + ao | 329576 | -1 +(1 row) + +SELECT pg_table_size('ao'); + pg_table_size +--------------- + 329576 +(1 row) + commit; DROP TABLE ao; -- AOCS table @@ -194,3 +218,28 @@ SELECT pg_table_size('aocs_idx'); commit; DROP TABLE aocs; +-- AOCS table CTAS +begin; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + aocs | 763936 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 763936 +(1 row) + +commit; +DROP TABLE aocs; diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql index 28e7f280fb9..6cebf8afb54 100644 --- a/tests/regress/sql/test_uncommitted_table_size.sql +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -48,7 +48,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = SELECT pg_table_size('ao'); commit; --- AOCS table index +-- AO table index begin; CREATE INDEX ao_idx on ao(i); SELECT pg_sleep(5); @@ -58,6 +58,15 @@ commit; DROP TABLE ao; +-- AO table CTAS +begin; +CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; +SELECT pg_table_size('ao'); +commit; +DROP TABLE ao; + -- AOCS table begin; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); @@ -76,3 +85,12 @@ SELECT pg_table_size('aocs_idx'); commit; DROP TABLE aocs; + +-- AOCS table CTAS +begin; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; +SELECT pg_sleep(5); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; +SELECT pg_table_size('aocs'); +commit; +DROP TABLE aocs; \ No newline at end of file From 3bf77d5b88be35f4feba7b55f8f38ff4e005b2f4 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 13 Dec 2021 10:34:33 +0800 Subject: [PATCH 096/330] Add support for dispatching blackmap to segments. (#104) This patch adds support for dispatching blackmap to segments. This patch also introduces two UDFs: diskquota.enable_hardlimit() diskquota.disable_hardlimit() User is able to enable and disable the hardlimit feature by using those UDFs. Co-authored-by: hzhang2 --- diskquota--1.0--2.0.sql | 10 + diskquota--2.0--1.0.sql | 4 + diskquota--2.0.sql | 10 + diskquota.c | 17 -- diskquota.h | 2 + quotamodel.c | 206 ++++++++++++++++-- tests/isolation2/expected/test_blackmap.out | 45 ++-- tests/isolation2/sql/test_blackmap.sql | 9 + tests/regress/Makefile | 4 +- tests/regress/diskquota_schedule | 4 + tests/regress/expected/prepare.out | 33 +-- tests/regress/expected/test_ctas_role.out | 87 ++++++++ tests/regress/expected/test_ctas_schema.out | 84 +++++++ .../expected/test_ctas_tablespace_role.out | 88 ++++++++ .../expected/test_ctas_tablespace_schema.out | 91 ++++++++ .../regress/expected/test_tablespace_role.out | 10 +- .../expected/test_tablespace_role_perseg.out | 12 +- .../expected/test_uncommitted_table_size.out | 3 +- tests/regress/regress_init_file | 14 ++ tests/regress/sql/prepare.sql | 7 +- tests/regress/sql/test_ctas_role.sql | 36 +++ tests/regress/sql/test_ctas_schema.sql | 33 +++ .../regress/sql/test_ctas_tablespace_role.sql | 45 ++++ .../sql/test_ctas_tablespace_schema.sql | 45 ++++ 24 files changed, 820 insertions(+), 79 deletions(-) create mode 100644 tests/regress/expected/test_ctas_role.out create mode 100644 tests/regress/expected/test_ctas_schema.out create mode 100644 tests/regress/expected/test_ctas_tablespace_role.out create mode 100644 tests/regress/expected/test_ctas_tablespace_schema.out create mode 100644 tests/regress/regress_init_file create mode 100644 tests/regress/sql/test_ctas_role.sql create mode 100644 tests/regress/sql/test_ctas_schema.sql create mode 100644 tests/regress/sql/test_ctas_tablespace_role.sql create mode 100644 tests/regress/sql/test_ctas_tablespace_schema.sql diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 9e9b7abb5f3..e086f5580b4 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -32,6 +32,16 @@ RETURNS void STRICT AS 'MODULE_PATHNAME', 'diskquota_resume' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.enable_hardlimit() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_enable_hardlimit' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.disable_hardlimit() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_disable_hardlimit' +LANGUAGE C; + CREATE TYPE diskquota.blackmap_entry AS (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index 5b5fa22043e..dc8a50d865b 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -8,6 +8,10 @@ DROP FUNCTION IF EXISTS diskquota.pause(); DROP FUNCTION IF EXISTS diskquota.resume(); +DROP FUNCTION IF EXISTS diskquota.disable_hardlimit(); + +DROP FUNCTION IF EXISTS diskquota.enable_hardlimit(); + DROP FUNCTION IF EXISTS diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); DROP TYPE IF EXISTS diskquota.blackmap_entry; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 89903967ce7..ef4a1114d70 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -93,6 +93,16 @@ RETURNS void STRICT AS 'MODULE_PATHNAME', 'diskquota_resume' LANGUAGE C; +CREATE OR REPLACE FUNCTION diskquota.enable_hardlimit() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_enable_hardlimit' +LANGUAGE C; + +CREATE OR REPLACE FUNCTION diskquota.disable_hardlimit() +RETURNS void STRICT +AS 'MODULE_PATHNAME', 'diskquota_disable_hardlimit' +LANGUAGE C; + CREATE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes from diskquota.table_size as ts, diff --git a/diskquota.c b/diskquota.c index c7e05544e3b..efd8809cdf6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -65,7 +65,6 @@ static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ int diskquota_naptime = 0; int diskquota_max_active_tables = 0; -static bool diskquota_enable_hardlimit = false; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -251,17 +250,6 @@ define_guc_variables(void) NULL, NULL, NULL); - - DefineCustomBoolVariable("diskquota.enable_hardlimit", - "Use in-query diskquota enforcement", - NULL, - &diskquota_enable_hardlimit, - false, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -390,11 +378,6 @@ disk_quota_worker_main(Datum main_arg) /* Do the work */ refresh_disk_quota_model(false); - - if (diskquota_enable_hardlimit) - { - /* TODO: Add hard limit function here */ - } } /* clear the out-of-quota blacklist in shared memory */ diff --git a/diskquota.h b/diskquota.h index bb355c2e480..f2ba1ac5a33 100644 --- a/diskquota.h +++ b/diskquota.h @@ -37,6 +37,7 @@ struct DiskQuotaLocks LWLock *monitoring_dbid_cache_lock; LWLock *paused_lock; LWLock *relation_cache_lock; + LWLock *hardlimit_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; #define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) @@ -93,6 +94,7 @@ typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; extern bool *diskquota_paused; +extern bool *diskquota_hardlimit; /* drop extension hook */ extern void register_diskquota_object_access_hook(void); diff --git a/quotamodel.c b/quotamodel.c index 8c67a8e6dff..59c04103bf2 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -47,6 +47,7 @@ #include "utils/lsyscache.h" #include "utils/snapmgr.h" #include "utils/syscache.h" +#include "libpq-fe.h" #include #include @@ -185,6 +186,8 @@ static HTAB *table_size_map = NULL; static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; +bool *diskquota_hardlimit = NULL; + static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ @@ -199,9 +202,10 @@ static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_k /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); -static void calculate_table_disk_usage(bool is_init); +static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); static void flush_to_table_size(void); static void flush_local_black_map(void); +static void dispatch_blackmap(HTAB *local_active_table_stat_map); static bool load_quotas(void); static void do_load_quotas(void); static bool do_check_diskquota_state_is_ready(void); @@ -468,6 +472,12 @@ disk_quota_shmem_startup(void) if (!found) memset((void *) diskquota_paused, 0, sizeof(bool)); + diskquota_hardlimit = ShmemInitStruct("diskquota_hardlimit", + sizeof(bool), + &found); + if (!found) + memset((void *) diskquota_hardlimit, 0, sizeof(bool)); + LWLockRelease(AddinShmemInitLock); } @@ -491,6 +501,7 @@ init_lwlocks(void) diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); diskquota_locks.paused_lock = LWLockAssign(); diskquota_locks.relation_cache_lock = LWLockAssign(); + diskquota_locks.hardlimit_lock = LWLockAssign(); } /* @@ -509,6 +520,7 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); size += sizeof(bool); /* sizeof(*diskquota_paused) */ + size += sizeof(bool); /* sizeof(*diskquota_hardlimit) */ return size; } @@ -712,6 +724,8 @@ refresh_disk_quota_usage(bool is_init) bool connected = false; bool pushed_active_snap = false; bool ret = true; + HTAB *local_active_table_stat_map = NULL; + bool enable_hardlimit; StartTransactionCommand(); @@ -731,8 +745,13 @@ refresh_disk_quota_usage(bool is_init) connected = true; PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; + /* + * initialization stage all the tables are active. later loop, only the + * tables whose disk size changed will be treated as active + */ + local_active_table_stat_map = gp_fetch_active_tables(is_init); /* recalculate the disk usage of table, schema and role */ - calculate_table_disk_usage(is_init); + calculate_table_disk_usage(is_init, local_active_table_stat_map); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { check_quota_map(type); } @@ -740,6 +759,13 @@ refresh_disk_quota_usage(bool is_init) flush_to_table_size(); /* copy local black map back to shared black map */ flush_local_black_map(); + /* Dispatch blackmap entries to segments to perform hard-limit. */ + LWLockAcquire(diskquota_locks.hardlimit_lock, LW_SHARED); + enable_hardlimit = *diskquota_hardlimit; + LWLockRelease(diskquota_locks.hardlimit_lock); + if (enable_hardlimit) + dispatch_blackmap(local_active_table_stat_map); + hash_destroy(local_active_table_stat_map); } PG_CATCH(); { @@ -802,7 +828,7 @@ merge_uncommitted_table_to_oidlist(List *oidlist) */ static void -calculate_table_disk_usage(bool is_init) +calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { bool table_size_map_found; bool active_tbl_found; @@ -810,19 +836,11 @@ calculate_table_disk_usage(bool is_init) TableSizeEntry *tsentry = NULL; Oid relOid; HASH_SEQ_STATUS iter; - HTAB *local_active_table_stat_map; DiskQuotaActiveTableEntry *active_table_entry; TableEntryKey key; List *oidlist; ListCell *l; - - /* - * initialization stage all the tables are active. later loop, only the - * tables whose disk size changed will be treated as active - */ - local_active_table_stat_map = gp_fetch_active_tables(is_init); - /* * unset is_exist flag for tsentry in table_size_map this is used to * detect tables which have been dropped. @@ -1001,8 +1019,6 @@ calculate_table_disk_usage(bool is_init) list_free(oidlist); - hash_destroy(local_active_table_stat_map); - /* * Process removed tables. Reduce schema and role size firstly. Remove * table from table_size_map in flush_to_table_size() function later. @@ -1201,6 +1217,66 @@ flush_local_black_map(void) LWLockRelease(diskquota_locks.black_map_lock); } +/* + * Dispatch blackmap to segment servers. + */ +static void +dispatch_blackmap(HTAB *local_active_table_stat_map) +{ + HASH_SEQ_STATUS hash_seq; + GlobalBlackMapEntry *blackmap_entry; + DiskQuotaActiveTableEntry *active_table_entry; + int num_entries, count = 0; + CdbPgResults cdb_pgresults = {NULL, 0}; + StringInfoData rows; + StringInfoData active_oids; + StringInfoData sql; + + initStringInfo(&rows); + initStringInfo(&active_oids); + initStringInfo(&sql); + + LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); + num_entries = hash_get_num_entries(disk_quota_black_map); + hash_seq_init(&hash_seq, disk_quota_black_map); + while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) + { + appendStringInfo(&rows, + "ROW(%d, %d, %d, %d, %s)", + blackmap_entry->keyitem.targetoid, + blackmap_entry->keyitem.databaseoid, + blackmap_entry->keyitem.tablespaceoid, + blackmap_entry->keyitem.targettype, + blackmap_entry->segexceeded ? "true" : "false"); + + if (++count != num_entries) + appendStringInfo(&rows, ","); + } + LWLockRelease(diskquota_locks.black_map_lock); + + count = 0; + num_entries = hash_get_num_entries(local_active_table_stat_map); + hash_seq_init(&hash_seq, local_active_table_stat_map); + while ((active_table_entry = hash_seq_search(&hash_seq)) != NULL) + { + appendStringInfo(&active_oids, + "%d", active_table_entry->reloid); + + if (++count != num_entries) + appendStringInfo(&active_oids, ","); + } + + appendStringInfo(&sql, + "select diskquota.refresh_blackmap(" + "ARRAY[%s]::diskquota.blackmap_entry[], " + "ARRAY[%s]::oid[])", rows.data, active_oids.data); + CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); + + pfree(rows.data); + pfree(active_oids.data); + pfree(sql.data); +} + /* * Make sure a StringInfo's string is no longer than 'nchars' characters. */ @@ -1508,6 +1584,7 @@ bool quota_check_common(Oid reloid, RelFileNode *relfilenode) { bool paused; + bool enable_hardlimit; if (!IsTransactionState()) return true; @@ -1522,7 +1599,16 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) if (OidIsValid(reloid)) return check_blackmap_by_reloid(reloid); - if (relfilenode) + LWLockAcquire(diskquota_locks.hardlimit_lock, LW_SHARED); + enable_hardlimit = *diskquota_hardlimit; + LWLockRelease(diskquota_locks.hardlimit_lock); + +#ifdef FAULT_INJECTOR + if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) + enable_hardlimit = true; +#endif + + if (relfilenode && enable_hardlimit) return check_blackmap_by_relfilenode(*relfilenode); return true; @@ -1615,11 +1701,11 @@ export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace: %s role: %s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); + errmsg("tablespace:%s role:%s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); else ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace: %s role: %s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); + errmsg("tablespace:%s role:%s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); break; default : ereport(ERROR, @@ -1925,7 +2011,12 @@ refresh_blackmap(PG_FUNCTION_ARGS) GlobalBlackMapEntry *new_entry; new_entry = hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_ENTER_NULL, &found); - if (!found && new_entry) + /* + * We don't perform soft-limit on segment servers, so we don't flush the + * blackmap entry with a valid targetoid to the global blackmap on segment + * servers. + */ + if (!found && new_entry && !OidIsValid(blackmapentry->keyitem.targetoid)) memcpy(new_entry, blackmapentry, sizeof(GlobalBlackMapEntry)); } LWLockRelease(diskquota_locks.black_map_lock); @@ -2077,3 +2168,84 @@ show_blackmap(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } + +static void +dispatch_hardlimit_flag(bool enable_hardlimit) +{ + CdbPgResults cdb_pgresults = {NULL, 0}; + int i; + StringInfoData sql; + + initStringInfo(&sql); + appendStringInfo(&sql, "SELECT diskquota.%s", + enable_hardlimit ? "enable_hardlimit()" : "disable_hardlimit()"); + CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); + + for (i = 0; i < cdb_pgresults.numResults; ++i) + { + PGresult *pgresult = cdb_pgresults.pg_results[i]; + if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) + { + cdbdisp_clearCdbPgResults(&cdb_pgresults); + ereport(ERROR, + (errmsg("[diskquota] cannot %s hardlimit on segments, encounter unexpected result from segment: %d", + enable_hardlimit ? "enable" : "disable", + PQresultStatus(pgresult)))); + } + } + cdbdisp_clearCdbPgResults(&cdb_pgresults); +} + +PG_FUNCTION_INFO_V1(diskquota_enable_hardlimit); +Datum +diskquota_enable_hardlimit(PG_FUNCTION_ARGS) +{ + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to enable hardlimit"))); + + /* + * If this UDF is executed on segment servers, we should clear + * the blackmap firstly, or the relation may be blocked by the + * blackmap dispatched by the previous iteration. + */ + if (!IS_QUERY_DISPATCHER()) + { + HASH_SEQ_STATUS hash_seq; + GlobalBlackMapEntry *blackmapentry; + LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, disk_quota_black_map); + while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) + hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.black_map_lock); + } + + LWLockAcquire(diskquota_locks.hardlimit_lock, LW_EXCLUSIVE); + *diskquota_hardlimit = true; + LWLockRelease(diskquota_locks.hardlimit_lock); + + if (IS_QUERY_DISPATCHER()) + dispatch_hardlimit_flag(true /*enable_hardlimit*/); + + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(diskquota_disable_hardlimit); +Datum +diskquota_disable_hardlimit(PG_FUNCTION_ARGS) +{ + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to disable hardlimit"))); + + LWLockAcquire(diskquota_locks.hardlimit_lock, LW_EXCLUSIVE); + *diskquota_hardlimit = false; + LWLockRelease(diskquota_locks.hardlimit_lock); + + if (IS_QUERY_DISPATCHER()) + dispatch_hardlimit_flag(false /*enable_hardlimit*/); + + PG_RETURN_VOID(); +} diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out index b972c8beadc..25101fca9d7 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_blackmap.out @@ -6,6 +6,14 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW(targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ (SELECT reltablespace FROM pg_class WHERE relname=rel::text), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; CREATE + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + -- 1. Test canceling the extending of an ordinary table. CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); CREATE @@ -36,7 +44,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -75,7 +83,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -114,7 +122,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -155,7 +163,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -187,7 +195,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -218,7 +226,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -302,7 +310,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -347,7 +355,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=65759) +ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -392,7 +400,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -437,7 +445,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663 role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 role:10 diskquota exceeded (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -482,7 +490,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -527,7 +535,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663 role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=65759) +ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -574,7 +582,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -622,7 +630,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -670,7 +678,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=65759) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -679,3 +687,10 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: ------------------ (1 row) + +-- Disable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_blackmap.sql index 340aa723df5..f95c5380237 100644 --- a/tests/isolation2/sql/test_blackmap.sql +++ b/tests/isolation2/sql/test_blackmap.sql @@ -40,6 +40,11 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, END; $$ /*in func*/ LANGUAGE 'plpgsql'; + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- 1. Test canceling the extending of an ordinary table. CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); INSERT INTO blocked_t1 SELECT generate_series(1, 100); @@ -506,3 +511,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + +-- Disable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; diff --git a/tests/regress/Makefile b/tests/regress/Makefile index ca9f369c674..398997c6acc 100644 --- a/tests/regress/Makefile +++ b/tests/regress/Makefile @@ -1,8 +1,8 @@ REGRESS = dummy ifeq ("$(INTEGRATION_TEST)","y") -REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=../init_file +REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=../init_file --init-file=./regress_init_file else -REGRESS_OPTS = --schedule=diskquota_schedule --init-file=../init_file +REGRESS_OPTS = --schedule=diskquota_schedule --init-file=../init_file --init-file=./regress_init_file endif # FIXME: This check is hacky, since test_fetch_table_stat relies on the diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index bd74bda51bb..d0ee2c1f45b 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -20,4 +20,8 @@ test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly test: test_blackmap +test: test_ctas_role +test: test_ctas_schema +test: test_ctas_tablespace_role +test: test_ctas_tablespace_schema test: clean diff --git a/tests/regress/expected/prepare.out b/tests/regress/expected/prepare.out index 48b51c26687..f05028dc90c 100644 --- a/tests/regress/expected/prepare.out +++ b/tests/regress/expected/prepare.out @@ -1,18 +1,12 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Starting gpstop with args: -u -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Gathering information and validating the environment... -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Segment details from master... -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.16105.gdfbfc2b build dev' -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Signalling all postmaster processes to reload -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Starting gpstop with args: -u +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Gathering information and validating the environment... +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Obtaining Greenplum Master catalog information +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Obtaining Segment details from master... +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.18.2+dev.173.g55557f44f3 build dev' +20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Signalling all postmaster processes to reload -- end_ignore SELECT pg_sleep(1); pg_sleep @@ -27,6 +21,13 @@ SELECT pg_sleep(15); (1 row) +-- disable hardlimit feature. +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + -- prepare a schema that has reached quota limit CREATE SCHEMA badquota; DROP ROLE IF EXISTS testbody; @@ -56,13 +57,15 @@ SELECT pg_sleep(10); (1 row) -SELECT size, segid from diskquota.table_size where tableid in (select oid from pg_class where relname='t1'); +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; size | segid ---------+------- - 1310720 | 0 1310720 | 2 - 3932160 | -1 1310720 | 1 + 1310720 | 0 + 3932160 | -1 (4 rows) -- expect fail diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out new file mode 100644 index 00000000000..0965c2bd316 --- /dev/null +++ b/tests/regress/expected/test_ctas_role.out @@ -0,0 +1,87 @@ +-- Test that diskquota is able to cancel a running CTAS query by the role quota. +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +CREATE ROLE hardlimit_r; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); + set_role_quota +---------------- + +(1 row) + +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 10000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- temp table +CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- toast table +CREATE TABLE toast_table AS SELECT ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); +ERROR: permission denied for schema diskquota +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET ROLE; +DROP ROLE hardlimit_r; diff --git a/tests/regress/expected/test_ctas_schema.out b/tests/regress/expected/test_ctas_schema.out new file mode 100644 index 00000000000..f77e76f45f7 --- /dev/null +++ b/tests/regress/expected/test_ctas_schema.out @@ -0,0 +1,84 @@ +-- Test that diskquota is able to cancel a running CTAS query by the schema quota. +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO hardlimit_s; +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +DROP SCHEMA hardlimit_s; diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out new file mode 100644 index 00000000000..c18ca2be188 --- /dev/null +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -0,0 +1,88 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +-- start_ignore +\! mkdir -p /tmp/ctas_rolespc +-- end_ignore +-- prepare role and tablespace. +DROP TABLESPACE IF EXISTS ctas_rolespc; +NOTICE: tablespace "ctas_rolespc" does not exist, skipping +CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; +CREATE ROLE hardlimit_r; +NOTICE: resource queue required -- using default resource queue "pg_default" +GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; +SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET default_tablespace = ctas_rolespc; +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-role's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET ROLE; +RESET default_tablespace; +DROP TABLESPACE ctas_rolespc; +\! rm -rf /tmp/ctas_rolespc; +DROP ROLE hardlimit_r; +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + diff --git a/tests/regress/expected/test_ctas_tablespace_schema.out b/tests/regress/expected/test_ctas_tablespace_schema.out new file mode 100644 index 00000000000..edded27111a --- /dev/null +++ b/tests/regress/expected/test_ctas_tablespace_schema.out @@ -0,0 +1,91 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +-- start_ignore +\! mkdir -p /tmp/ctas_schemaspc +-- end_ignore +-- prepare tablespace and schema +DROP TABLESPACE IF EXISTS ctas_schemaspc; +NOTICE: tablespace "ctas_schemaspc" does not exist, skipping +CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO hardlimit_s; +SET default_tablespace = ctas_schemaspc; +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +[hardlimit] tablespace-schema's disk space quota exceeded +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +-- disable hardlimit and do some clean-ups +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +RESET default_tablespace; +DROP SCHEMA hardlimit_s; +DROP TABLESPACE ctas_schemaspc; +\! rm -rf /tmp/ctas_schemaspc; +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index 55811740054..f14e1600738 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -54,10 +54,10 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- Test show_fast_schema_tablespace_quota_view SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes @@ -86,7 +86,7 @@ SELECT pg_sleep(20); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- Test alter tablespace -- start_ignore \! mkdir /tmp/rolespc2 @@ -113,7 +113,7 @@ SELECT pg_sleep(20); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- Test update quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); set_role_tablespace_quota @@ -139,7 +139,7 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc role: rolespcu1 diskquota exceeded +ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); set_role_tablespace_quota diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index 9b9879a3f0d..8c960eeb38e 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -36,7 +36,7 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded -- change tablespace role quota SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); set_role_tablespace_quota @@ -73,7 +73,7 @@ SELECT pg_sleep(5); ---- expect insert fail by tablespace schema perseg quota INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test alter owner ALTER TABLE b OWNER TO rolespc_persegu2; SELECT pg_sleep(20); @@ -93,7 +93,7 @@ SELECT pg_sleep(20); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test alter tablespace -- start_ignore \! mkdir /tmp/rolespc_perseg2 @@ -120,7 +120,7 @@ SELECT pg_sleep(20); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); set_per_segment_quota @@ -156,7 +156,7 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test delete per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); set_per_segment_quota @@ -186,7 +186,7 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg role: rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); set_role_tablespace_quota diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index f4e465ee1c1..2082992335b 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -149,7 +149,8 @@ DROP TABLE ao; -- AO table CTAS begin; CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT pg_sleep(5); pg_sleep ---------- diff --git a/tests/regress/regress_init_file b/tests/regress/regress_init_file new file mode 100644 index 00000000000..63dd2602ff0 --- /dev/null +++ b/tests/regress/regress_init_file @@ -0,0 +1,14 @@ +-- start_matchsubs +# Remove oid of schema/role/tablespace from error message. +m/ERROR: role's disk space quota exceeded with name:\d+.*/ +s/ERROR: role's disk space quota exceeded with name:\d+.*/[hardlimit] role's disk space quota exceeded/ + +m/ERROR: schema's disk space quota exceeded with name:\d+.*/ +s/ERROR: schema's disk space quota exceeded with name:\d+.*/[hardlimit] schema's disk space quota exceeded/ + +m/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/ +s/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ + +m/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/ +s/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ +-- end_matchsubs diff --git a/tests/regress/sql/prepare.sql b/tests/regress/sql/prepare.sql index 7457643aa16..b1bf89846a6 100644 --- a/tests/regress/sql/prepare.sql +++ b/tests/regress/sql/prepare.sql @@ -6,6 +6,9 @@ SELECT pg_sleep(1); \! cp data/csmall.txt /tmp/csmall.txt SELECT pg_sleep(15); +-- disable hardlimit feature. +SELECT diskquota.disable_hardlimit(); + -- prepare a schema that has reached quota limit CREATE SCHEMA badquota; DROP ROLE IF EXISTS testbody; @@ -16,6 +19,8 @@ INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT diskquota.init_table_size_table(); SELECT diskquota.set_schema_quota('badquota', '1 MB'); SELECT pg_sleep(10); -SELECT size, segid from diskquota.table_size where tableid in (select oid from pg_class where relname='t1'); +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql new file mode 100644 index 00000000000..93e0e97f3ea --- /dev/null +++ b/tests/regress/sql/test_ctas_role.sql @@ -0,0 +1,36 @@ +-- Test that diskquota is able to cancel a running CTAS query by the role quota. +SELECT diskquota.enable_hardlimit(); +CREATE ROLE hardlimit_r; +SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); +SET ROLE hardlimit_r; + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 10000000); +SELECT pg_sleep(5); + +-- temp table +CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- toast table +CREATE TABLE toast_table AS SELECT ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000); +SELECT pg_sleep(5); + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT pg_sleep(5); + +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET ROLE; +DROP ROLE hardlimit_r; diff --git a/tests/regress/sql/test_ctas_schema.sql b/tests/regress/sql/test_ctas_schema.sql new file mode 100644 index 00000000000..b8b72b58554 --- /dev/null +++ b/tests/regress/sql/test_ctas_schema.sql @@ -0,0 +1,33 @@ +-- Test that diskquota is able to cancel a running CTAS query by the schema quota. +SELECT diskquota.enable_hardlimit(); +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); +SET search_path TO hardlimit_s; +SELECT pg_sleep(5); + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +SELECT pg_sleep(5); + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT pg_sleep(5); + +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET search_path; +DROP SCHEMA hardlimit_s; diff --git a/tests/regress/sql/test_ctas_tablespace_role.sql b/tests/regress/sql/test_ctas_tablespace_role.sql new file mode 100644 index 00000000000..a9b7c23304c --- /dev/null +++ b/tests/regress/sql/test_ctas_tablespace_role.sql @@ -0,0 +1,45 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. +SELECT diskquota.enable_hardlimit(); +-- start_ignore +\! mkdir -p /tmp/ctas_rolespc +-- end_ignore + +-- prepare role and tablespace. +DROP TABLESPACE IF EXISTS ctas_rolespc; +CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; +CREATE ROLE hardlimit_r; +GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; +SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); +SET default_tablespace = ctas_rolespc; +SET ROLE hardlimit_r; + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +SELECT pg_sleep(5); + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT pg_sleep(5); + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET ROLE; +RESET default_tablespace; +DROP TABLESPACE ctas_rolespc; +\! rm -rf /tmp/ctas_rolespc; +DROP ROLE hardlimit_r; +SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_ctas_tablespace_schema.sql b/tests/regress/sql/test_ctas_tablespace_schema.sql new file mode 100644 index 00000000000..a7dc9916c93 --- /dev/null +++ b/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -0,0 +1,45 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. +SELECT diskquota.enable_hardlimit(); + +-- start_ignore +\! mkdir -p /tmp/ctas_schemaspc +-- end_ignore + +-- prepare tablespace and schema +DROP TABLESPACE IF EXISTS ctas_schemaspc; +CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); +SET search_path TO hardlimit_s; +SET default_tablespace = ctas_schemaspc; + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- toast table +CREATE TABLE toast_table + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +SELECT pg_sleep(5); + +-- ao table +CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +SELECT pg_sleep(5); + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT pg_sleep(5); + +-- disable hardlimit and do some clean-ups +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS toast_table; +DROP TABLE IF EXISTS ao_table; +DROP TABLE IF EXISTS aocs_table; +RESET search_path; +RESET default_tablespace; +DROP SCHEMA hardlimit_s; +DROP TABLESPACE ctas_schemaspc; +\! rm -rf /tmp/ctas_schemaspc; +SELECT diskquota.disable_hardlimit(); From dfbebb6f87a52e5b26418c8c10ea05451e216be4 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 13 Dec 2021 16:51:04 +0800 Subject: [PATCH 097/330] Ignore distribution notice in regression tests. (#106) This PR is trying to fix CI building failure by ignoring distribution notice in CTAS statements. Co-authored-by: Hao Zhang --- tests/init_file | 3 +++ tests/regress/expected/test_fetch_table_stat.out | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/init_file b/tests/init_file index 874fc9ec888..46f44f4c788 100644 --- a/tests/init_file +++ b/tests/init_file @@ -3,7 +3,10 @@ -- Individual tests can contain additional patterns specific to the test. -- start_matchignore +# This pattern is extracted from gpdb/src/test/regress/init_file +m/^(?:HINT|NOTICE):\s+.+\'DISTRIBUTED BY\' clause.*/ -- end_matchignore + -- start_matchsubs m/diskquota.c:\d+\)/ s/diskquota.c:\d+\)/diskquota.c:xxx/ diff --git a/tests/regress/expected/test_fetch_table_stat.out b/tests/regress/expected/test_fetch_table_stat.out index 477bc264d06..45f1a7e97ef 100644 --- a/tests/regress/expected/test_fetch_table_stat.out +++ b/tests/regress/expected/test_fetch_table_stat.out @@ -20,7 +20,6 @@ SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) SELECT COUNT(*) FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; -WARNING: fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' count ------- 1 From c10a1f57e0dc3ffb2afc1dd602d907827bc48cf8 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 15 Dec 2021 10:59:32 +0800 Subject: [PATCH 098/330] Trying to make concourse happy. (#108) --- concourse/scripts/test_common.sh | 8 ++++---- concourse/scripts/test_diskquota.sh | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh index d3cf8f88084..ef509cd84be 100644 --- a/concourse/scripts/test_common.sh +++ b/concourse/scripts/test_common.sh @@ -17,16 +17,16 @@ function test(){ pushd $1 trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && cat regression.diffs && exit 1 if $2 ; then - ps -ef | grep postgres| grep qddir| cut -d ' ' -f ${CUT_NUMBER} | xargs kill -9 + ## Bring down the QD. + gpstop -may -M immediate export PGPORT=6001 echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh - rm /tmp/.s.PGSQL.6000* gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && exit 1 + [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && cat regression.diffs && exit 1 fi popd EOF diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index f761d58ce19..fec377dc531 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -9,8 +9,27 @@ CUT_NUMBER=6 source "${GPDB_CONCOURSE_DIR}/common.bash" source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" + +## Currently, isolation2 testing framework relies on pg_isolation2_regress, we +## should build it from source. However, in concourse, the gpdb_bin is fetched +## from remote machine, the $(abs_top_srcdir) variable points to a non-existing +## location, we fixes this issue by creating a symbolic link for it. +function create_fake_gpdb_src() { + pushd gpdb_src + ./configure --prefix=/usr/local/greenplum-db-devel \ + --with-perl --with-python --with-libxml \ + --without-zstd \ + --disable-orca --disable-gpcloud --enable-debug-extensions + popd + + FAKE_GPDB_SRC=/tmp/build/"$(grep -rnw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' | head -n 1 | awk -F"/" '{print $(NF-1)}')" + mkdir -p ${FAKE_GPDB_SRC} + ln -s ${TOP_DIR}/gpdb_src ${FAKE_GPDB_SRC}/gpdb_src +} + function _main() { time install_gpdb + create_fake_gpdb_src time setup_gpadmin_user time make_cluster From cc5d9a000993794475adeb115b3d8dca9b971906 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 15 Dec 2021 15:55:39 +0800 Subject: [PATCH 099/330] Print the diff files when the tests failed. (#109) --- concourse/scripts/test_common.sh | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh index ef509cd84be..0a72ffe8518 100644 --- a/concourse/scripts/test_common.sh +++ b/concourse/scripts/test_common.sh @@ -15,9 +15,26 @@ function test(){ gpstop -arf # the dir to run the "make install" command pushd $1 - trap "[ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs" EXIT + + function look4diffs() { + diff_files=\`find .. -name regression.diffs\` + for diff_file in \${diff_files}; do + if [ -f "\${diff_file}" ]; then + cat <<-FEOF + ====================================================================== + DIFF FILE: \${diff_file} + ====================================================================== + + \$(grep -v GP_IGNORE "\${diff_file}") + FEOF + fi + done + exit 1 + } + + trap look4diffs ERR make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && cat regression.diffs && exit 1 + if $2 ; then ## Bring down the QD. gpstop -may -M immediate @@ -26,7 +43,6 @@ function test(){ source /usr/local/greenplum-db-devel/greenplum_path.sh gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck - [ -s regression.diffs ] && grep -v GP_IGNORE regression.diffs && cat regression.diffs && exit 1 fi popd EOF From 5c165ca736746d10d4f8c5a5a5e0fac78dbd1685 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 16 Dec 2021 09:09:12 +0800 Subject: [PATCH 100/330] Fix flaky test cases. NFC. (#110) --- .../expected/test_uncommitted_table_size.out | 30 +++++++------------ .../sql/test_uncommitted_table_size.sql | 10 +++---- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index 2082992335b..93ee2205a42 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -110,16 +110,11 @@ SELECT pg_sleep(5); (1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - ao | 1263784 | -1 -(1 row) - -SELECT pg_table_size('ao'); - pg_table_size ---------------- - 1263784 +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t (1 row) commit; @@ -157,16 +152,11 @@ SELECT pg_sleep(5); (1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; - tableid | size | segid ----------+--------+------- - ao | 329576 | -1 -(1 row) - -SELECT pg_table_size('ao'); - pg_table_size ---------------- - 329576 +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t (1 row) commit; diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql index 6cebf8afb54..97ea41352d1 100644 --- a/tests/regress/sql/test_uncommitted_table_size.sql +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -44,8 +44,8 @@ begin; CREATE TABLE ao (i int) WITH (appendonly=true); INSERT INTO ao SELECT generate_series(1, 100000); SELECT pg_sleep(5); -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; -SELECT pg_table_size('ao'); +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); commit; -- AO table index @@ -62,8 +62,8 @@ DROP TABLE ao; begin; CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); SELECT pg_sleep(5); -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; -SELECT pg_table_size('ao'); +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); commit; DROP TABLE ao; @@ -93,4 +93,4 @@ SELECT pg_sleep(5); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; SELECT pg_table_size('aocs'); commit; -DROP TABLE aocs; \ No newline at end of file +DROP TABLE aocs; From 81e15562e1a0c1a9c472054ccdc4397b3a7f819d Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Mon, 20 Dec 2021 15:04:52 +0800 Subject: [PATCH 101/330] Fix bug: table's relation_cache_entry is not be removed after vacuum full (#112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix bug The relation_cache_entry of temporary table, created during vacuum full, is not be removed after vacuum full. This table will be treated as an uncommitted table, although it has been dropped after vacuum full. And its table size still remains in diskquota.table_size, which causes quota size to be larger than real status. Use RelidByRelfilenode() to check whether the table is committed, and remove its relation_cache_entry. Co-authored-by: hzhang2 Co-authored-by: Xing Guo Co-authored-by: Xuebin Su (苏学斌) --- relation_cache.c | 10 ++++++++-- tests/regress/expected/test_vacuum.out | 13 +++++++++++++ tests/regress/sql/test_vacuum.sql | 2 ++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/relation_cache.c b/relation_cache.c index 48bc19d6a52..d26fa187513 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -289,9 +289,15 @@ remove_committed_relation_from_cache(void) hash_seq_init(&iter, local_relation_cache); while ((local_entry = hash_seq_search(&iter)) != NULL) { - if (SearchSysCacheExists1(RELOID, local_entry->relid)) + /* + * The committed table's oid can be fetched by RelidByRelfilenode(). + * If the table's relfilenode is modified and its relation_cache_entry + * remains in relation_cache, the outdated relation_cache_entry should + * be removed. + */ + if (OidIsValid(RelidByRelfilenode(local_entry->rnode.node.spcNode, local_entry->rnode.node.relNode))) { - remove_cache_entry(local_entry->relid, InvalidOid); + remove_cache_entry(InvalidOid, local_entry->rnode.node.relNode); } } hash_destroy(local_relation_cache); diff --git a/tests/regress/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out index a4c40423ce7..24b985c29e0 100644 --- a/tests/regress/expected/test_vacuum.out +++ b/tests/regress/expected/test_vacuum.out @@ -34,6 +34,19 @@ SELECT pg_sleep(20); (1 row) +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; + tableid | size | segid +---------+-------+------- + b | 0 | 2 + b | 0 | 1 + b | 0 | 0 + b | 0 | -1 + a | 32768 | 2 + a | 32768 | 1 + a | 32768 | 0 + a | 98304 | -1 +(8 rows) + -- expect insert succeed INSERT INTO a SELECT generate_series(1,10); INSERT INTO b SELECT generate_series(1,10); diff --git a/tests/regress/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql index ddc444262a9..64bea8a8ea4 100644 --- a/tests/regress/sql/test_vacuum.sql +++ b/tests/regress/sql/test_vacuum.sql @@ -13,6 +13,8 @@ INSERT INTO b SELECT generate_series(1,10); DELETE FROM a WHERE i > 10; VACUUM FULL a; SELECT pg_sleep(20); +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name not like '%.%' ORDER BY size, segid DESC; + -- expect insert succeed INSERT INTO a SELECT generate_series(1,10); INSERT INTO b SELECT generate_series(1,10); From 6ee3cbbf7b0d9ca8a7c762ea3279f905aa86a43c Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Tue, 21 Dec 2021 14:43:41 +0800 Subject: [PATCH 102/330] Fix test case bug (#114) The SQL statement is not equal to the expected output in test_vacuum.sql. Co-authored-by: hzhang2 --- tests/regress/sql/test_vacuum.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/regress/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql index 64bea8a8ea4..e4926273914 100644 --- a/tests/regress/sql/test_vacuum.sql +++ b/tests/regress/sql/test_vacuum.sql @@ -13,7 +13,7 @@ INSERT INTO b SELECT generate_series(1,10); DELETE FROM a WHERE i > 10; VACUUM FULL a; SELECT pg_sleep(20); -SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name not like '%.%' ORDER BY size, segid DESC; +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; -- expect insert succeed INSERT INTO a SELECT generate_series(1,10); From 7fe6f019c1ff748be63a3e21bfa8925689585224 Mon Sep 17 00:00:00 2001 From: Zhang Hao <1446384557@qq.com> Date: Tue, 21 Dec 2021 15:07:24 +0800 Subject: [PATCH 103/330] Fix bug: can not calculate the size of pg_aoblkdir_xxxx before `create index on ao_table` is committed (#113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can not calculate the size of pg_aoblkdir_xxxx before `create index on ao_table` is committed. 1. Lack of the ability to parse the name of pg_aoblkdir_xxxx. 2. pg_aoblkdir_xxxx is created by `create index on ao_table`, which can not be searched by diskquota_get_appendonly_aux_oid_list() before index's creation. Solution: 1. parse the name begin with `pg_aoblkdir`. 2. When blkdirrelid is missing, we try to fetch it by traversing relation_cache. Co-authored-by: hzhang2 Co-authored-by: Xing Guo Co-authored-by: Xuebin Su (苏学斌) --- diskquota.h | 1 + diskquota_utility.c | 27 ++++++ relation_cache.c | 85 ++++++++++++------- .../expected/test_uncommitted_table_size.out | 15 +++- .../sql/test_uncommitted_table_size.sql | 2 + 5 files changed, 99 insertions(+), 31 deletions(-) diff --git a/diskquota.h b/diskquota.h index f2ba1ac5a33..8fb00281b81 100644 --- a/diskquota.h +++ b/diskquota.h @@ -125,4 +125,5 @@ extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char r extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); extern List* diskquota_get_index_list(Oid relid); extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); +extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index b1a19d7214a..0637544e3a1 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -29,6 +29,8 @@ #include "catalog/pg_extension.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_tablespace.h" #include "catalog/indexing.h" #include "commands/dbcommands.h" #include "commands/extension.h" @@ -1407,3 +1409,28 @@ diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirreli systable_endscan(scan); heap_close(aorel, AccessShareLock); } + +Oid +diskquota_parse_primary_table_oid(Oid namespace, char *relname) +{ + switch (namespace) + { + case PG_TOAST_NAMESPACE: + if (strncmp(relname, "pg_toast", 8) == 0) + return atoi(&relname[9]); + break; + case PG_AOSEGMENT_NAMESPACE: + { + if (strncmp(relname, "pg_aoseg", 8) == 0) + return atoi(&relname[9]); + else if (strncmp(relname, "pg_aovisimap", 12) == 0) + return atoi(&relname[13]); + else if (strncmp(relname, "pg_aocsseg", 10) == 0) + return atoi(&relname[11]); + else if (strncmp(relname, "pg_aoblkdir", 11) == 0) + return atoi(&relname[12]); + } + break; + } + return InvalidOid; +} diff --git a/relation_cache.c b/relation_cache.c index d26fa187513..c8bdc407de4 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -201,6 +201,7 @@ parse_primary_table_oid(Oid relid) { Relation rel; Oid namespace; + Oid parsed_oid; char relname[NAMEDATALEN]; rel = diskquota_relation_open(relid, NoLock); @@ -213,22 +214,10 @@ parse_primary_table_oid(Oid relid) memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); relation_close(rel, NoLock); - switch (namespace) + parsed_oid = diskquota_parse_primary_table_oid(namespace, relname); + if (OidIsValid(parsed_oid)) { - case PG_TOAST_NAMESPACE: - if (strncmp(relname, "pg_toast", 8) == 0) - return atoi(&relname[9]); - break; - case PG_AOSEGMENT_NAMESPACE: - { - if (strncmp(relname, "pg_aoseg", 8) == 0) - return atoi(&relname[9]); - else if (strncmp(relname, "pg_aovisimap", 12) == 0) - return atoi(&relname[13]); - else if (strncmp(relname, "pg_aocsseg", 10) == 0) - return atoi(&relname[11]); - } - break; + return parsed_oid; } return relid; } @@ -430,7 +419,14 @@ add_auxrelation_to_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *pentry list_free(index_oids); } -static void +/* + * Returns true iff blkdirrelid is missing. + * pg_aoblkdir_xxxx is created by `create index on ao_table`, which can not be + * fetched by diskquota_get_appendonly_aux_oid_list() before index's creation + * finish. By returning true to inform the caller that blkdirrelid is missing, + * then the caller will fetch blkdirrelid by traversing relation_cache. + */ +static bool get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relation_entry) { HeapTuple classTup; @@ -438,11 +434,12 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relatio Oid segrelid = InvalidOid; Oid blkdirrelid = InvalidOid; Oid visimaprelid = InvalidOid; + bool is_ao = false; classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(classTup) || relation_entry == NULL) { - return; + return false; } classForm = (Form_pg_class) GETSTRUCT(classTup); @@ -465,28 +462,42 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relatio add_auxrelation_to_relation_entry(classForm->reltoastrelid, relation_entry); } + if (classForm->relstorage == RELSTORAGE_AOROWS || classForm->relstorage == RELSTORAGE_AOCOLS) + { + is_ao = true; + } heap_freetuple(classTup); /* ao table */ - diskquota_get_appendonly_aux_oid_list(relid, &segrelid, &blkdirrelid, &visimaprelid); - if (OidIsValid(segrelid)) - { - add_auxrelation_to_relation_entry(segrelid, relation_entry); - } - if (OidIsValid(blkdirrelid)) - { - add_auxrelation_to_relation_entry(blkdirrelid, relation_entry); - } - if (OidIsValid(visimaprelid)) + if (is_ao) { - add_auxrelation_to_relation_entry(visimaprelid, relation_entry); + diskquota_get_appendonly_aux_oid_list(relid, &segrelid, &blkdirrelid, &visimaprelid); + if (OidIsValid(segrelid)) + { + add_auxrelation_to_relation_entry(segrelid, relation_entry); + } + if (OidIsValid(blkdirrelid)) + { + add_auxrelation_to_relation_entry(blkdirrelid, relation_entry); + } + if (OidIsValid(visimaprelid)) + { + add_auxrelation_to_relation_entry(visimaprelid, relation_entry); + } + + if (!OidIsValid(blkdirrelid)) + { + return true; + } } + return false; } static void get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry* entry) { DiskQuotaRelationCacheEntry* tentry; + bool is_missing_relid; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); tentry = hash_search(relation_cache, &relid, HASH_FIND, NULL); @@ -498,7 +509,23 @@ get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry* entry) } LWLockRelease(diskquota_locks.relation_cache_lock); - get_relation_entry_from_pg_class(relid, entry); + is_missing_relid = get_relation_entry_from_pg_class(relid, entry); + + if (is_missing_relid) + { + DiskQuotaRelationCacheEntry *relation_cache_entry; + HASH_SEQ_STATUS iter; + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); + hash_seq_init(&iter, relation_cache); + while ((relation_cache_entry = hash_seq_search(&iter)) != NULL) + { + if (relation_cache_entry->primary_table_relid == relid) + { + add_auxrelid_to_relation_entry(entry, relation_cache_entry->relid); + } + } + LWLockRelease(diskquota_locks.relation_cache_lock); + } } static void diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index 93ee2205a42..c388137c377 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -139,13 +139,24 @@ SELECT pg_table_size('ao_idx'); 2490368 (1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao | 1591464 | -1 +(1 row) + +SELECT pg_table_size('ao'); + pg_table_size +--------------- + 1591464 +(1 row) + commit; DROP TABLE ao; -- AO table CTAS begin; CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry. SELECT pg_sleep(5); pg_sleep ---------- diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql index 97ea41352d1..4e682ed3311 100644 --- a/tests/regress/sql/test_uncommitted_table_size.sql +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -54,6 +54,8 @@ CREATE INDEX ao_idx on ao(i); SELECT pg_sleep(5); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; SELECT pg_table_size('ao_idx'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; +SELECT pg_table_size('ao'); commit; DROP TABLE ao; From 91ab8da1f419352b2f90e1a82d111c07a49fd518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Wed, 22 Dec 2021 10:57:02 +0800 Subject: [PATCH 104/330] Add UDF to show worker status (#111) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Xuebin Su (苏学斌) <12034000+xuebinsu@users.noreply.github.com> Co-authored-by: Xing Guo Co-authored-by: Hao Zhang --- diskquota--2.0.sql | 37 +++ diskquota.c | 86 +++-- diskquota.h | 19 ++ quotamodel.c | 14 + tests/regress/diskquota_schedule | 1 + tests/regress/expected/init.out | 3 +- tests/regress/expected/prepare.out | 32 +- tests/regress/expected/test_appendonly.out | 16 +- tests/regress/expected/test_column.out | 24 +- tests/regress/expected/test_copy.out | 8 +- tests/regress/expected/test_ctas_role.out | 54 +-- tests/regress/expected/test_ctas_schema.out | 38 +-- .../expected/test_ctas_tablespace_role.out | 39 ++- .../expected/test_ctas_tablespace_schema.out | 32 +- tests/regress/expected/test_delete_quota.out | 16 +- tests/regress/expected/test_drop_table.out | 16 +- tests/regress/expected/test_extension.out | 68 ++-- .../regress/expected/test_fast_disk_check.out | 8 +- tests/regress/expected/test_index.out | 42 +-- .../expected/test_many_active_tables.out | 16 +- tests/regress/expected/test_partition.out | 24 +- .../expected/test_pause_and_resume.out | 8 +- .../regress/expected/test_primary_failure.out | 313 ++++++++---------- .../regress/expected/test_relation_cache.out | 32 +- tests/regress/expected/test_rename.out | 16 +- tests/regress/expected/test_reschema.out | 16 +- tests/regress/expected/test_role.out | 32 +- tests/regress/expected/test_schema.out | 32 +- .../regress/expected/test_tablespace_role.out | 78 ++--- .../expected/test_tablespace_role_perseg.out | 96 +++--- .../expected/test_tablespace_schema.out | 70 ++-- .../test_tablespace_schema_perseg.out | 96 +++--- tests/regress/expected/test_temp_role.out | 16 +- tests/regress/expected/test_toast.out | 8 +- tests/regress/expected/test_truncate.out | 16 +- .../expected/test_uncommitted_table_size.out | 83 ++--- tests/regress/expected/test_update.out | 8 +- tests/regress/expected/test_vacuum.out | 16 +- tests/regress/expected/test_worker_epoch.out | 9 + tests/regress/sql/init.sql | 4 +- tests/regress/sql/prepare.sql | 4 +- tests/regress/sql/test_appendonly.sql | 4 +- tests/regress/sql/test_column.sql | 6 +- tests/regress/sql/test_copy.sql | 2 +- tests/regress/sql/test_ctas_role.sql | 18 +- tests/regress/sql/test_ctas_schema.sql | 9 +- .../regress/sql/test_ctas_tablespace_role.sql | 10 +- .../sql/test_ctas_tablespace_schema.sql | 8 +- tests/regress/sql/test_delete_quota.sql | 4 +- tests/regress/sql/test_drop_table.sql | 6 +- tests/regress/sql/test_extension.sql | 16 +- tests/regress/sql/test_fast_disk_check.sql | 2 +- tests/regress/sql/test_index.sql | 10 +- tests/regress/sql/test_many_active_tables.sql | 4 +- tests/regress/sql/test_partition.sql | 6 +- tests/regress/sql/test_pause_and_resume.sql | 2 +- tests/regress/sql/test_primary_failure.sql | 8 +- tests/regress/sql/test_relation_cache.sql | 8 +- tests/regress/sql/test_rename.sql | 4 +- tests/regress/sql/test_reschema.sql | 4 +- tests/regress/sql/test_role.sql | 8 +- tests/regress/sql/test_schema.sql | 10 +- tests/regress/sql/test_tablespace_role.sql | 19 +- .../sql/test_tablespace_role_perseg.sql | 24 +- tests/regress/sql/test_tablespace_schema.sql | 18 +- .../sql/test_tablespace_schema_perseg.sql | 24 +- tests/regress/sql/test_temp_role.sql | 4 +- tests/regress/sql/test_toast.sql | 2 +- tests/regress/sql/test_truncate.sql | 4 +- .../sql/test_uncommitted_table_size.sql | 20 +- tests/regress/sql/test_update.sql | 2 +- tests/regress/sql/test_vacuum.sql | 4 +- tests/regress/sql/test_worker_epoch.sql | 5 + 73 files changed, 944 insertions(+), 877 deletions(-) create mode 100644 tests/regress/expected/test_worker_epoch.out create mode 100644 tests/regress/sql/test_worker_epoch.sql diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index ef4a1114d70..4dc0934ef0a 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -198,3 +198,40 @@ WITH relation_cache AS ( ) SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- Returns the worker epoch for the current database. +-- An epoch marks a new iteration of refreshing quota usage by a bgworker. +-- An epoch is a 32-bit unsigned integer and there is NO invalid value. +-- Therefore, the UDF must throw an error if something unexpected occurs. +CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() +RETURNS bigint STRICT +AS 'MODULE_PATHNAME', 'show_worker_epoch' +LANGUAGE C; + +-- Checks if the bgworker for the current database works as expected. +-- 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. +-- 2. If it does not terminate, there must be some issues with the bgworker. +-- In this case, we must ensure this UDF can be interrupted by the user. +CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() +RETURNS boolean STRICT +AS $$ +DECLARE + current_epoch bigint; + new_epoch bigint; +BEGIN + current_epoch := diskquota.show_worker_epoch(); + LOOP + new_epoch := diskquota.show_worker_epoch(); + IF new_epoch <> current_epoch THEN + current_epoch := new_epoch; + LOOP + new_epoch := diskquota.show_worker_epoch(); + IF new_epoch <> current_epoch THEN + RETURN TRUE; + END IF; + END LOOP; + END IF; + END LOOP; + RETURN FALSE; +END; +$$ LANGUAGE PLpgSQL; diff --git a/diskquota.c b/diskquota.c index efd8809cdf6..3cd7b861961 100644 --- a/diskquota.c +++ b/diskquota.c @@ -35,7 +35,6 @@ #include "miscadmin.h" #include "nodes/makefuncs.h" #include "pgstat.h" -#include "postmaster/bgworker.h" #include "storage/ipc.h" #include "storage/proc.h" #include "tcop/idle_resource_cleaner.h" @@ -66,21 +65,11 @@ static volatile sig_atomic_t got_sigusr1 = false; int diskquota_naptime = 0; int diskquota_max_active_tables = 0; -typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; - -/* disk quota worker info used by launcher to manage the worker processes. */ -struct DiskQuotaWorkerEntry -{ - Oid dbid; - pid_t pid; /* worker pid */ - BackgroundWorkerHandle *handle; -}; - DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; /* using hash table to support incremental update the table size entry.*/ -static HTAB *disk_quota_worker_map = NULL; +HTAB *disk_quota_worker_map = NULL; static int num_db = 0; /* @@ -230,7 +219,7 @@ define_guc_variables(void) NULL, &diskquota_naptime, 2, - 1, + 0, INT_MAX, PGC_SIGHUP, 0, @@ -364,7 +353,6 @@ disk_quota_worker_main(Datum main_arg) diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); - /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); @@ -378,6 +366,7 @@ disk_quota_worker_main(Datum main_arg) /* Do the work */ refresh_disk_quota_model(false); + worker_increase_epoch(MyDatabaseId); } /* clear the out-of-quota blacklist in shared memory */ @@ -403,7 +392,6 @@ static inline bool isAbnormalLoopTime(int diff_sec) void disk_quota_launcher_main(Datum main_arg) { - HASHCTL hash_ctl; time_t loop_begin, loop_end; /* establish signal handlers before unblocking signals. */ @@ -437,17 +425,6 @@ disk_quota_launcher_main(Datum main_arg) */ create_monitor_db_table(); - /* use disk_quota_worker_map to manage diskquota worker processes. */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); - hash_ctl.hash = oid_hash; - - disk_quota_worker_map = hash_create("disk quota worker map", - 1024, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); - /* * firstly start worker processes for each databases with diskquota * enabled. @@ -901,6 +878,7 @@ try_kill_db_worker(Oid dbid) DiskQuotaWorkerEntry *hash_entry; bool found; + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); hash_entry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, (void *) &dbid, HASH_REMOVE, &found); @@ -915,6 +893,7 @@ try_kill_db_worker(Oid dbid) pfree(handle); } } + LWLockRelease(diskquota_locks.worker_map_lock); } /* @@ -926,6 +905,7 @@ terminate_all_workers(void) DiskQuotaWorkerEntry *hash_entry; HASH_SEQ_STATUS iter; + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); hash_seq_init(&iter, disk_quota_worker_map); @@ -938,6 +918,7 @@ terminate_all_workers(void) if (hash_entry->handle) TerminateBackgroundWorker(hash_entry->handle); } + LWLockRelease(diskquota_locks.worker_map_lock); } /* @@ -1000,6 +981,8 @@ start_worker_by_dboid(Oid dbid) Assert(status == BGWH_STARTED); + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + /* put the worker handle into the worker map */ workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, (void *) &dbid, @@ -1008,8 +991,11 @@ start_worker_by_dboid(Oid dbid) { workerentry->handle = handle; workerentry->pid = pid; + workerentry->epoch = 0; } + LWLockRelease(diskquota_locks.worker_map_lock); + return true; } @@ -1029,3 +1015,51 @@ is_valid_dbid(Oid dbid) ReleaseSysCache(tuple); return true; } + +bool +worker_increase_epoch(Oid database_oid) +{ + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + + bool found = false; + DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( + disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); + + if (found) + { + ++(workerentry->epoch); + } + LWLockRelease(diskquota_locks.worker_map_lock); + return found; +} + +unsigned int +worker_get_epoch(Oid database_oid) +{ + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + + bool found = false; + unsigned int epoch = 0; + DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( + disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); + + if (found) + { + epoch = workerentry->epoch; + } + LWLockRelease(diskquota_locks.worker_map_lock); + if (!found) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] worker not found for database \"%s\"", + get_database_name(MyDatabaseId)))); + } + return epoch; +} + +PG_FUNCTION_INFO_V1(show_worker_epoch); +Datum +show_worker_epoch(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); +} diff --git a/diskquota.h b/diskquota.h index 8fb00281b81..d3d9c415bf6 100644 --- a/diskquota.h +++ b/diskquota.h @@ -2,6 +2,7 @@ #define DISK_QUOTA_H #include "storage/lwlock.h" +#include "postmaster/bgworker.h" /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 @@ -38,6 +39,7 @@ struct DiskQuotaLocks LWLock *paused_lock; LWLock *relation_cache_lock; LWLock *hardlimit_lock; + LWLock *worker_map_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; #define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) @@ -96,6 +98,19 @@ extern ExtensionDDLMessage *extension_ddl_message; extern bool *diskquota_paused; extern bool *diskquota_hardlimit; +typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; + +/* disk quota worker info used by launcher to manage the worker processes. */ +struct DiskQuotaWorkerEntry +{ + Oid dbid; + pid_t pid; /* worker pid */ + unsigned int epoch; + BackgroundWorkerHandle *handle; +}; + +extern HTAB *disk_quota_worker_map; + /* drop extension hook */ extern void register_diskquota_object_access_hook(void); @@ -126,4 +141,8 @@ extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); extern List* diskquota_get_index_list(Oid relid); extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); + +extern bool worker_increase_epoch(Oid database_oid); +extern unsigned int worker_get_epoch(Oid database_oid); + #endif diff --git a/quotamodel.c b/quotamodel.c index 59c04103bf2..75d2ac69a8d 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -478,6 +478,18 @@ disk_quota_shmem_startup(void) if (!found) memset((void *) diskquota_hardlimit, 0, sizeof(bool)); + /* use disk_quota_worker_map to manage diskquota worker processes. */ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); + hash_ctl.hash = oid_hash; + + disk_quota_worker_map = ShmemInitHash("disk quota worker map", + MAX_NUM_MONITORED_DB, + MAX_NUM_MONITORED_DB, + &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + LWLockRelease(AddinShmemInitLock); } @@ -502,6 +514,7 @@ init_lwlocks(void) diskquota_locks.paused_lock = LWLockAssign(); diskquota_locks.relation_cache_lock = LWLockAssign(); diskquota_locks.hardlimit_lock = LWLockAssign(); + diskquota_locks.worker_map_lock = LWLockAssign(); } /* @@ -519,6 +532,7 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); + size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(DiskQuotaWorkerEntry))); size += sizeof(bool); /* sizeof(*diskquota_paused) */ size += sizeof(bool); /* sizeof(*diskquota_hardlimit) */ return size; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index d0ee2c1f45b..7cc4bd64790 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,5 +1,6 @@ test: init test: prepare +test: test_worker_epoch test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size diff --git a/tests/regress/expected/init.out b/tests/regress/expected/init.out index 0f149d65ca9..7b09388e986 100644 --- a/tests/regress/expected/init.out +++ b/tests/regress/expected/init.out @@ -4,7 +4,7 @@ \! echo $? 0 -- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null +\! gpconfig -c diskquota.naptime -v 0 > /dev/null -- end_ignore \! echo $? 0 @@ -18,4 +18,3 @@ -- end_ignore \! echo $? 0 -\! sleep 10 diff --git a/tests/regress/expected/prepare.out b/tests/regress/expected/prepare.out index f05028dc90c..107a00a2378 100644 --- a/tests/regress/expected/prepare.out +++ b/tests/regress/expected/prepare.out @@ -1,26 +1,14 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Starting gpstop with args: -u -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Gathering information and validating the environment... -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Obtaining Greenplum Master catalog information -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Obtaining Segment details from master... -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.18.2+dev.173.g55557f44f3 build dev' -20211213:00:02:48:240132 gpstop:laptop:v-[INFO]:-Signalling all postmaster processes to reload +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Starting gpstop with args: -u +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Gathering information and validating the environment... +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Obtaining Greenplum Master catalog information +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Obtaining Segment details from master... +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.18.2+dev.173.g55557f44f3 build dev' +20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Signalling all postmaster processes to reload -- end_ignore -SELECT pg_sleep(1); - pg_sleep ----------- - -(1 row) - \! cp data/csmall.txt /tmp/csmall.txt -SELECT pg_sleep(15); - pg_sleep ----------- - -(1 row) - -- disable hardlimit feature. SELECT diskquota.disable_hardlimit(); disable_hardlimit @@ -51,10 +39,10 @@ SELECT diskquota.set_schema_quota('badquota', '1 MB'); (1 row) -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT size, segid FROM diskquota.table_size diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index 4cdcc66d589..5775a0de4fe 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -14,10 +14,10 @@ CREATE INDEX index_t2 ON t_aoco(i); -- is identical to the result of pg_table_size(). INSERT INTO t_ao SELECT generate_series(1, 100); INSERT INTO t_aoco SELECT generate_series(1, 100); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- Query the size of t_ao. @@ -59,10 +59,10 @@ SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); -- expect success. INSERT INTO t_ao SELECT generate_series(1, 1000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect fail. diff --git a/tests/regress/expected/test_column.out b/tests/regress/expected/test_column.out index 10143ce2081..79e4450ffdb 100644 --- a/tests/regress/expected/test_column.out +++ b/tests/regress/expected/test_column.out @@ -7,10 +7,10 @@ SELECT diskquota.set_schema_quota('scolumn', '1 MB'); (1 row) SET search_path TO scolumn; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) CREATE TABLE a2(i INT); @@ -18,10 +18,10 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect fail INSERT INTO a2 SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect fail @@ -30,10 +30,10 @@ ERROR: schema's disk space quota exceeded with name:scolumn ALTER TABLE a2 ADD COLUMN j VARCHAR(50); UPDATE a2 SET j = 'add value for column j'; ERROR: schema's disk space quota exceeded with name:scolumn -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert failed after add column diff --git a/tests/regress/expected/test_copy.out b/tests/regress/expected/test_copy.out index c921bcac042..f8cc758536b 100644 --- a/tests/regress/expected/test_copy.out +++ b/tests/regress/expected/test_copy.out @@ -13,10 +13,10 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur COPY c FROM '/tmp/csmall.txt'; -- expect failed INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect copy fail diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index 0965c2bd316..71ca544db4d 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -13,16 +13,17 @@ SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); (1 row) +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 10000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- temp table @@ -30,21 +31,21 @@ CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- toast table -CREATE TABLE toast_table AS SELECT ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- ao table @@ -52,27 +53,25 @@ CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000 NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000) AS i; + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); -ERROR: permission denied for schema diskquota DROP TABLE IF EXISTS t1; NOTICE: table "t1" does not exist, skipping DROP TABLE IF EXISTS t2; @@ -84,4 +83,11 @@ NOTICE: table "ao_table" does not exist, skipping DROP TABLE IF EXISTS aocs_table; NOTICE: table "aocs_table" does not exist, skipping RESET ROLE; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + diff --git a/tests/regress/expected/test_ctas_schema.out b/tests/regress/expected/test_ctas_schema.out index f77e76f45f7..549a70a3156 100644 --- a/tests/regress/expected/test_ctas_schema.out +++ b/tests/regress/expected/test_ctas_schema.out @@ -13,21 +13,15 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); (1 row) SET search_path TO hardlimit_s; -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 100000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- toast table @@ -36,10 +30,10 @@ CREATE TABLE toast_table NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- ao table @@ -47,10 +41,10 @@ CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000 NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- aocs table @@ -59,10 +53,10 @@ CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- disable hardlimit and do some clean-ups. diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out index c18ca2be188..8b884ce58e4 100644 --- a/tests/regress/expected/test_ctas_tablespace_role.out +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -14,6 +14,7 @@ NOTICE: tablespace "ctas_rolespc" does not exist, skipping CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; CREATE ROLE hardlimit_r; NOTICE: resource queue required -- using default resource queue "pg_default" +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); set_role_tablespace_quota @@ -28,10 +29,10 @@ CREATE TABLE t1 AS SELECT generate_series(1, 100000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- toast table @@ -40,10 +41,10 @@ CREATE TABLE toast_table NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- ao table @@ -51,10 +52,10 @@ CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000 NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- aocs table @@ -63,22 +64,28 @@ CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- disable hardlimit and do some clean-ups. DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping RESET ROLE; RESET default_tablespace; DROP TABLESPACE ctas_rolespc; \! rm -rf /tmp/ctas_rolespc; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; SELECT diskquota.disable_hardlimit(); disable_hardlimit diff --git a/tests/regress/expected/test_ctas_tablespace_schema.out b/tests/regress/expected/test_ctas_tablespace_schema.out index edded27111a..77543a00cc6 100644 --- a/tests/regress/expected/test_ctas_tablespace_schema.out +++ b/tests/regress/expected/test_ctas_tablespace_schema.out @@ -26,10 +26,10 @@ CREATE TABLE t1 AS SELECT generate_series(1, 100000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- toast table @@ -38,10 +38,10 @@ CREATE TABLE toast_table NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- ao table @@ -49,10 +49,10 @@ CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000 NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- aocs table @@ -61,10 +61,10 @@ CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- disable hardlimit and do some clean-ups diff --git a/tests/regress/expected/test_delete_quota.out b/tests/regress/expected/test_delete_quota.out index 1a5852d2e50..aca3418e315 100644 --- a/tests/regress/expected/test_delete_quota.out +++ b/tests/regress/expected/test_delete_quota.out @@ -12,10 +12,10 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect failed INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect fail @@ -27,10 +27,10 @@ SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); (1 row) -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO c SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_drop_table.out b/tests/regress/expected/test_drop_table.out index cbd9d6376de..31ac1879184 100644 --- a/tests/regress/expected/test_drop_table.out +++ b/tests/regress/expected/test_drop_table.out @@ -16,20 +16,20 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:sdrtbl DROP TABLE a; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO a2 SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index ff831e27fa1..085019e2147 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -37,10 +37,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -88,10 +88,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -113,10 +113,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -138,10 +138,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -163,10 +163,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -188,10 +188,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -213,10 +213,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -238,10 +238,10 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO SX.a values(generate_series(0, 10)); @@ -249,13 +249,13 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:179) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:290) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:179) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:290) \! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 diff --git a/tests/regress/expected/test_fast_disk_check.out b/tests/regress/expected/test_fast_disk_check.out index 430cf48d0fe..5178157235c 100644 --- a/tests/regress/expected/test_fast_disk_check.out +++ b/tests/regress/expected/test_fast_disk_check.out @@ -5,10 +5,10 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,200000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; diff --git a/tests/regress/expected/test_index.out b/tests/regress/expected/test_index.out index 5455346ac82..f6724e915bb 100644 --- a/tests/regress/expected/test_index.out +++ b/tests/regress/expected/test_index.out @@ -17,10 +17,10 @@ SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; @@ -38,10 +38,10 @@ SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and re -- create index for the table, index in default tablespace CREATE INDEX a_index ON test_index_a(i); INSERT INTO test_index_a SELECT generate_series(1,10000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -61,10 +61,10 @@ SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (r -- add index to tablespace indexspc ALTER index a_index SET TABLESPACE indexspc; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; @@ -76,8 +76,8 @@ SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; size | segid ---------+------- - 1441792 | -1 1212416 | -1 + 1441792 | -1 (2 rows) -- expect insert fail @@ -85,19 +85,19 @@ INSERT INTO test_index_a SELECT generate_series(1,100); ERROR: tablespace:indexspc schema:indexschema1 diskquota exceeded -- index tablespace quota exceeded ALTER table test_index_a SET TABLESPACE pg_default; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success INSERT INTO test_index_a SELECT generate_series(1,100); INSERT INTO test_index_a SELECT generate_series(1,200000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail diff --git a/tests/regress/expected/test_many_active_tables.out b/tests/regress/expected/test_many_active_tables.out index be8da723287..8bf8158708e 100644 --- a/tests/regress/expected/test_many_active_tables.out +++ b/tests/regress/expected/test_many_active_tables.out @@ -1003,10 +1003,10 @@ NOTICE: CREATE TABLE will create partition "t1_1_prt_999" for table "t1" INSERT INTO t1 SELECT pk, val FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; @@ -1016,10 +1016,10 @@ SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; (1 row) DROP TABLE t1; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; diff --git a/tests/regress/expected/test_partition.out b/tests/regress/expected/test_partition.out index 6d67954584f..81e67202426 100644 --- a/tests/regress/expected/test_partition.out +++ b/tests/regress/expected/test_partition.out @@ -23,19 +23,19 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur NOTICE: CREATE TABLE will create partition "measurement_1_prt_feb06" for table "measurement" NOTICE: CREATE TABLE will create partition "measurement_1_prt_mar06" for table "measurement" INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -46,10 +46,10 @@ INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 DELETE FROM measurement WHERE logdate='2006-03-02'; VACUUM FULL measurement; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; diff --git a/tests/regress/expected/test_pause_and_resume.out b/tests/regress/expected/test_pause_and_resume.out index f0cee34e2a6..36edad17637 100644 --- a/tests/regress/expected/test_pause_and_resume.out +++ b/tests/regress/expected/test_pause_and_resume.out @@ -15,10 +15,10 @@ SELECT diskquota.set_schema_quota('s1', '1 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail diff --git a/tests/regress/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out index 6eda03f54ab..f9b7c779a59 100644 --- a/tests/regress/expected/test_primary_failure.out +++ b/tests/regress/expected/test_primary_failure.out @@ -35,10 +35,10 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -80,175 +80,155 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); - pg_recoverseg ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -a -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + - 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + - 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ - 20190411:07:15:52:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Standard + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery 1 of 1 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Synchronization mode = Incremental + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance host = instance-1 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance address = instance-1 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance port = 25432 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance host = instance-1 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance address = instance-1 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance port = 25435 + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Target = in-place + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-1 segment(s) to recover + - 20190411:07:15:53:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring 1 failed segment(s) are stopped + - 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + - 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating configuration with new mirrors + - 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating mirrors + - 20190411:07:15:54:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Running pg_rewind on required mirrors + - 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting mirrors + - 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-era is 3840cc8b7490e1d1_190411071143 + - 20190411:07:16:03:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Commencing parallel segment instance startup, please wait + - + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Process results + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering FTS probe + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating segments for streaming is completed + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-For segments updated successfully, streaming will continue in the background + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -s to check the streaming progress + - 20190411:07:16:06:032290 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - -(1 row) - -select pg_sleep(10); - pg_sleep ----------- + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -a -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Recovery type = Standard + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Recovery 1 of 1 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Synchronization mode = Incremental + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance host = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance address = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Failed instance port = 6002 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance host = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance address = laptop + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance port = 6005 + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:- Recovery Target = in-place + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:48:371791 gprecoverseg:laptop:v-[INFO]:-Starting to create new pg_hbaconf on primary segments + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Successfully modified pg_hbaconf on primary segments to allow replication connections + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-1 segment(s) to recover + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20211216:16:28:49:371791 gprecoverseg:laptop:v-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Updating configuration with new mirrors + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Updating mirrors + + 20211216:16:28:50:371791 gprecoverseg:laptop:v-[INFO]:-Running pg_rewind on failed segments + + laptop (dbid 2): 0/689186 kB (0%) copied + + laptop (dbid 2): syncing target data directory + + laptop (dbid 2): syncing target data directory + + laptop (dbid 2): Done! + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-Starting mirrors + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-era is 85b8357bd546c506_211216162717 + + 20211216:16:28:55:371791 gprecoverseg:laptop:v-[INFO]:-Commencing parallel segment instance startup, please wait + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Process results + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Triggering FTS probe + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Segments successfully recovered + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Recovered mirror segments need to sync WAL with primary segments + + 20211216:16:28:56:371791 gprecoverseg:laptop:v-[INFO]:-Use 'gpstate -e' to check progress of WAL sync remaining bytes + (1 row) select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); - pg_recoverseg ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -ar -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Rebalance + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Unbalanced segment 1 of 2 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance host = instance-1 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance address = instance-1 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance port = 25435 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Balanced role = Mirror + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Current role = Primary + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Unbalanced segment 2 of 2 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance host = instance-1 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance address = instance-1 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Unbalanced instance port = 25432 + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Balanced role = Primary + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Current role = Mirror + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Getting unbalanced segments + - 20190411:07:16:16:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Stopping unbalanced primary segments + - 20190411:07:16:17:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering segment reconfiguration + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting segment synchronization + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-=============================START ANOTHER RECOVER========================================= + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Greenplum instance recovery parameters + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery type = Standard + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Recovery 1 of 1 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Synchronization mode = Incremental + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance host = instance-1 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance address = instance-1 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Failed instance port = 25435 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance host = instance-1 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance address = instance-1 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance directory = /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Source instance port = 25432 + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:- Recovery Target = in-place + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:---------------------------------------------------------- + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-1 segment(s) to recover + - 20190411:07:16:24:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring 1 failed segment(s) are stopped + - 20190411:07:16:25:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + - 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating configuration with new mirrors + - 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating mirrors + - 20190411:07:16:26:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Running pg_rewind on required mirrors + - 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting mirrors + - 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-era is 3840cc8b7490e1d1_190411071143 + - 20190411:07:16:27:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Commencing parallel segment instance startup, please wait + - + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Process results + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Triggering FTS probe + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Updating segments for streaming is completed + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-For segments updated successfully, streaming will continue in the background + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -s to check the streaming progress + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-==============================END ANOTHER RECOVER========================================== + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-The rebalance operation has completed successfully + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-There is a resynchronization running in the background to bring all + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-segments in sync + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-Use gpstate -e to check the resynchronization progress + - 20190411:07:16:28:032561 gprecoverseg:instance-1:huanzhang-[INFO]:-****************************************************************** + - -(1 row) - -select pg_sleep(15); - pg_sleep ----------- + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -ar -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Recovery type = Rebalance + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Unbalanced segment 1 of 2 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance host = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance address = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance port = 6005 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Balanced role = Mirror + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Current role = Primary + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Unbalanced segment 2 of 2 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance host = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance address = laptop + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Unbalanced instance port = 6002 + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Balanced role = Primary + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:- Current role = Mirror + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Determining primary and mirror segment pairs to rebalance + + 20211216:16:28:56:373757 gprecoverseg:laptop:v-[INFO]:-Stopping unbalanced primary segments + + 20211216:16:28:57:373757 gprecoverseg:laptop:v-[INFO]:-Triggering segment reconfiguration + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Starting segment synchronization + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-=============================START ANOTHER RECOVER========================================= + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Heap checksum setting is consistent between master and the segments that are candidates for recoverseg + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Greenplum instance recovery parameters + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Recovery type = Standard + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Recovery 1 of 1 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Synchronization mode = Incremental + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance host = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance address = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast_mirror1/demoDataDir0 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Failed instance port = 6005 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance host = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance address = laptop + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance directory = /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/dbfast1/demoDataDir0 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Source instance port = 6002 + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:- Recovery Target = in-place + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:---------------------------------------------------------- + + 20211216:16:29:04:373757 gprecoverseg:laptop:v-[INFO]:-Starting to create new pg_hbaconf on primary segments + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Successfully modified pg_hbaconf on primary segments to allow replication connections + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-1 segment(s) to recover + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Ensuring 1 failed segment(s) are stopped + + 20211216:16:29:05:373757 gprecoverseg:laptop:v-[INFO]:-Ensuring that shared memory is cleaned up for stopped segments + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Updating configuration with new mirrors + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Updating mirrors + + 20211216:16:29:06:373757 gprecoverseg:laptop:v-[INFO]:-Running pg_rewind on failed segments + + laptop (dbid 5): no rewind required + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-Starting mirrors + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-era is 85b8357bd546c506_211216162717 + + 20211216:16:29:07:373757 gprecoverseg:laptop:v-[INFO]:-Commencing parallel segment instance startup, please wait + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Process results + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Triggering FTS probe + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Segments successfully recovered + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-******************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Recovered mirror segments need to sync WAL with primary segments + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-Use 'gpstate -e' to check progress of WAL sync remaining bytes + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-==============================END ANOTHER RECOVER========================================== + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-****************************************************************** + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-The rebalance operation has completed successfully + + 20211216:16:29:08:373757 gprecoverseg:laptop:v-[INFO]:-****************************************************************** + (1 row) select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); - pg_recoverseg ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -a -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + - 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + - 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ - 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + - 20190411:07:16:43:000456 gprecoverseg:instance-1:huanzhang-[INFO]:-No segments to recover + - -(1 row) - -select pg_sleep(10); - pg_sleep ----------- + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -a -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:08:375579 gprecoverseg:laptop:v-[INFO]:-No segments to recover + (1 row) select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); - pg_recoverseg ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-Starting gprecoverseg with args: -ar -d /home/huanzhang/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + - 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 600-alpha0+dev16010gc767124 build dev-oss' + - 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-master Greenplum Version: 'PostgreSQL 9420 (Greenplum Database 600-alpha0+dev16010gc767124 build dev-oss) on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 485 20150623 (Red Hat 485-36), 64-bit compiled on Apr 11 2019 02:51:20'+ - 20190411:07:16:53:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-Obtaining Segment details from master + - 20190411:07:16:54:000492 gprecoverseg:instance-1:huanzhang-[INFO]:-No segments are running in their non-preferred role and need to be rebalanced + - -(1 row) - -select pg_sleep(10); - pg_sleep ----------- + pg_recoverseg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 20211216:16:29:08:375616 gprecoverseg:laptop:v-[INFO]:-Starting gprecoverseg with args: -ar -d /home/v/x/gh/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-local Greenplum Version: 'postgres (Greenplum Database) 6182+dev173g55557f44f3 build dev' + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-master Greenplum Version: 'PostgreSQL 9426 (Greenplum Database 6182+dev173g55557f44f3 build dev) on x86_64-unknown-linux-gnu, compiled by clang version 1300, 64-bit compiled on Dec 16 2021 09:16:34 (with assert checking)'+ + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-Obtaining Segment details from master + + 20211216:16:29:09:375616 gprecoverseg:laptop:v-[INFO]:-No segments are running in their non-preferred role and need to be rebalanced + (1 row) @@ -260,14 +240,13 @@ select content, preferred_role, role, status, mode from gp_segment_configuration 0 | m | m | u | s (2 rows) --- no sleep, it will take effect immediately -SELECT pg_sleep(10); - pg_sleep ----------- - +-- end_ignore +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) --- end_ignore SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; quota_in_mb | nspsize_in_bytes -------------+------------------ diff --git a/tests/regress/expected/test_relation_cache.out b/tests/regress/expected/test_relation_cache.out index dbdba6482c6..862cf512aa8 100644 --- a/tests/regress/expected/test_relation_cache.out +++ b/tests/regress/expected/test_relation_cache.out @@ -23,10 +23,10 @@ select count(*) from diskquota.show_relation_cache_all_seg(); (1 row) commit; -select pg_sleep(5); - pg_sleep ----------- - +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) select count(*) from diskquota.show_relation_cache_all_seg(); @@ -55,10 +55,10 @@ select diskquota.check_relation_cache(); (1 row) commit; -select pg_sleep(5); - pg_sleep ----------- - +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) select count(*) from diskquota.show_relation_cache_all_seg(); @@ -87,10 +87,10 @@ select diskquota.check_relation_cache(); (1 row) commit; -select pg_sleep(5); - pg_sleep ----------- - +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) select count(*) from diskquota.show_relation_cache_all_seg(); @@ -119,10 +119,10 @@ select diskquota.check_relation_cache(); (1 row) commit; -select pg_sleep(5); - pg_sleep ----------- - +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) select count(*) from diskquota.show_relation_cache_all_seg(); diff --git a/tests/regress/expected/test_rename.out b/tests/regress/expected/test_rename.out index 687017ee0d8..53e69a03873 100644 --- a/tests/regress/expected/test_rename.out +++ b/tests/regress/expected/test_rename.out @@ -12,10 +12,10 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -51,10 +51,10 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur ALTER TABLE a OWNER TO srerole; -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail diff --git a/tests/regress/expected/test_reschema.out b/tests/regress/expected/test_reschema.out index 5bc6db3a480..3fe38be06d5 100644 --- a/tests/regress/expected/test_reschema.out +++ b/tests/regress/expected/test_reschema.out @@ -12,10 +12,10 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail when exceed quota limit @@ -28,10 +28,10 @@ SELECT diskquota.set_schema_quota('srE', '1 GB'); (1 row) -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index 9b21e01e451..69f89ed33ff 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -22,10 +22,10 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -41,10 +41,10 @@ SELECT diskquota.set_role_quota('u1', '-1 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -56,10 +56,10 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -72,10 +72,10 @@ SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_qu (1 row) ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index 0d6c8919f3c..a19384c4e8e 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -5,24 +5,18 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); --- expect insert fail +-- expect insert success INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - SELECT diskquota.set_schema_quota('s1', '1 MB'); set_schema_quota ------------------ (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -37,10 +31,10 @@ ERROR: schema's disk space quota exceeded with name:s1 -- Test alter table set schema CREATE SCHEMA s2; ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed @@ -51,10 +45,10 @@ ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); ERROR: schema's disk space quota exceeded with name:badquota -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index f14e1600738..23d25941587 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -25,31 +25,25 @@ ALTER TABLE b2 OWNER TO rolespcu1; INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); set_role_tablespace_quota --------------------------- (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success INSERT INTO b SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -67,10 +61,10 @@ SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM -- Test alter owner ALTER TABLE b OWNER TO rolespcu2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed @@ -78,10 +72,10 @@ INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -95,20 +89,20 @@ DROP TABLESPACE IF EXISTS rolespc2; NOTICE: tablespace "rolespc2" does not exist, skipping CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; ALTER TABLE b SET TABLESPACE rolespc2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- alter table b back to tablespace rolespc ALTER TABLE b SET TABLESPACE rolespc; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -121,20 +115,20 @@ SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); (1 row) -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,1000000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -147,10 +141,10 @@ SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index 8c960eeb38e..0dffd077ccd 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -28,10 +28,10 @@ SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -44,10 +44,10 @@ SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -65,10 +65,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) ---- expect insert fail by tablespace schema perseg quota @@ -76,19 +76,19 @@ INSERT INTO b SELECT generate_series(1,100); ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test alter owner ALTER TABLE b OWNER TO rolespc_persegu2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -102,20 +102,20 @@ DROP TABLESPACE IF EXISTS rolespc_perseg2; NOTICE: tablespace "rolespc_perseg2" does not exist, skipping CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; ALTER TABLE b SET TABLESPACE rolespc_perseg2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- alter table b back to tablespace rolespc_perseg ALTER TABLE b SET TABLESPACE rolespc_perseg; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -128,10 +128,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); (1 row) -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; @@ -148,10 +148,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -164,10 +164,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -178,10 +178,10 @@ SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -194,10 +194,10 @@ SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success diff --git a/tests/regress/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out index f4f4ef4e000..6282d84a62b 100644 --- a/tests/regress/expected/test_tablespace_schema.out +++ b/tests/regress/expected/test_tablespace_schema.out @@ -13,22 +13,16 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); set_schema_tablespace_quota ----------------------------- (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -43,10 +37,10 @@ ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -- Test alter table set schema CREATE SCHEMA spcs2; ALTER TABLE spcs1.a SET SCHEMA spcs2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed @@ -54,10 +48,10 @@ INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed INSERT INTO spcs2.a SELECT generate_series(1,200); ALTER TABLE spcs2.a SET SCHEMA spcs1; -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -77,19 +71,19 @@ DROP TABLESPACE IF EXISTS schemaspc2; NOTICE: tablespace "schemaspc2" does not exist, skipping CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; ALTER TABLE a SET TABLESPACE schemaspc2; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO a SELECT generate_series(1,200); ALTER TABLE a SET TABLESPACE schemaspc; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -102,20 +96,20 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); (1 row) -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,1000000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -128,10 +122,10 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index d5506c67807..a91218e6ffe 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -20,10 +20,10 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail by tablespace schema diskquota @@ -36,10 +36,10 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -56,10 +56,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) ---- expect insert fail by tablespace schema perseg quota @@ -68,19 +68,19 @@ ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per s -- Test alter table set schema CREATE SCHEMA spcs2_perseg; ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -100,19 +100,19 @@ DROP TABLESPACE IF EXISTS schemaspc_perseg2; NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; ALTER TABLE a SET TABLESPACE schemaspc_perseg2; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed INSERT INTO a SELECT generate_series(1,200); ALTER TABLE a SET TABLESPACE schemaspc_perseg; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -125,10 +125,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -139,10 +139,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) ---- expect insert fail @@ -155,10 +155,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success @@ -169,10 +169,10 @@ SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) ---- expect insert fail @@ -191,10 +191,10 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', (1 row) -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert success diff --git a/tests/regress/expected/test_temp_role.out b/tests/regress/expected/test_temp_role.out index 511df87bc32..0bf1813f8a6 100644 --- a/tests/regress/expected/test_temp_role.out +++ b/tests/regress/expected/test_temp_role.out @@ -19,20 +19,20 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table INSERT INTO ta SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expected failed: INSERT INTO a SELECT generate_series(1,100); ERROR: role's disk space quota exceeded with name:u3temp DROP TABLE ta; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_toast.out b/tests/regress/expected/test_toast.out index 681bad5729f..7b0e7fbf8ab 100644 --- a/tests/regress/expected/test_toast.out +++ b/tests/regress/expected/test_toast.out @@ -15,10 +15,10 @@ SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,10000)) FROM generate_series(1,10000); -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert toast fail diff --git a/tests/regress/expected/test_truncate.out b/tests/regress/expected/test_truncate.out index 61328e7c771..a863b0bf238 100644 --- a/tests/regress/expected/test_truncate.out +++ b/tests/regress/expected/test_truncate.out @@ -14,10 +14,10 @@ CREATE TABLE b (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -26,10 +26,10 @@ ERROR: schema's disk space quota exceeded with name:s7 INSERT INTO b SELECT generate_series(1,30); ERROR: schema's disk space quota exceeded with name:s7 TRUNCATE TABLE a; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert succeed diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index c388137c377..4fa4993f623 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -4,10 +4,10 @@ CREATE TEMP TABLE t1(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t1 SELECT generate_series(1, 100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; @@ -30,10 +30,10 @@ CREATE TABLE t2(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; @@ -52,10 +52,10 @@ commit; -- heap table index begin; CREATE INDEX idx2 on t2(i); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; @@ -78,10 +78,10 @@ CREATE TABLE t3(t text); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; @@ -104,10 +104,10 @@ CREATE TABLE ao (i int) WITH (appendonly=true); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO ao SELECT generate_series(1, 100000); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= @@ -121,10 +121,10 @@ commit; -- AO table index begin; CREATE INDEX ao_idx on ao(i); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; @@ -156,11 +156,12 @@ DROP TABLE ao; -- AO table CTAS begin; CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause. Creating a NULL policy entry. -SELECT pg_sleep(5); - pg_sleep ----------- - +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= @@ -178,10 +179,10 @@ CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; @@ -200,10 +201,10 @@ commit; -- AOCS table index begin; CREATE INDEX aocs_idx on aocs(i); -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; @@ -225,10 +226,10 @@ begin; CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; diff --git a/tests/regress/expected/test_update.out b/tests/regress/expected/test_update.out index 728739fc6d1..1dedcd1b130 100644 --- a/tests/regress/expected/test_update.out +++ b/tests/regress/expected/test_update.out @@ -11,10 +11,10 @@ CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect update fail. diff --git a/tests/regress/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out index 24b985c29e0..db179c654c6 100644 --- a/tests/regress/expected/test_vacuum.out +++ b/tests/regress/expected/test_vacuum.out @@ -14,10 +14,10 @@ CREATE TABLE b (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -- expect insert fail @@ -28,10 +28,10 @@ INSERT INTO b SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:s6 DELETE FROM a WHERE i > 10; VACUUM FULL a; -SELECT pg_sleep(20); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; diff --git a/tests/regress/expected/test_worker_epoch.out b/tests/regress/expected/test_worker_epoch.out new file mode 100644 index 00000000000..0d1875f1212 --- /dev/null +++ b/tests/regress/expected/test_worker_epoch.out @@ -0,0 +1,9 @@ +-- Test if the UDF returns successfully. +-- NOTE: This test should be the first one since the UDF is supposed +-- to be used in all other tests. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/tests/regress/sql/init.sql b/tests/regress/sql/init.sql index 921be5295db..ff9a16a0e9c 100644 --- a/tests/regress/sql/init.sql +++ b/tests/regress/sql/init.sql @@ -7,7 +7,7 @@ CREATE DATABASE diskquota; -- end_ignore \! echo $? -- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null +\! gpconfig -c diskquota.naptime -v 0 > /dev/null -- end_ignore \! echo $? -- start_ignore @@ -19,5 +19,3 @@ CREATE DATABASE diskquota; \! gpstop -raf > /dev/null -- end_ignore \! echo $? - -\! sleep 10 diff --git a/tests/regress/sql/prepare.sql b/tests/regress/sql/prepare.sql index b1bf89846a6..1a9dec104b0 100644 --- a/tests/regress/sql/prepare.sql +++ b/tests/regress/sql/prepare.sql @@ -2,9 +2,7 @@ CREATE EXTENSION diskquota; -- start_ignore \! gpstop -u -- end_ignore -SELECT pg_sleep(1); \! cp data/csmall.txt /tmp/csmall.txt -SELECT pg_sleep(15); -- disable hardlimit feature. SELECT diskquota.disable_hardlimit(); @@ -18,7 +16,7 @@ ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT diskquota.init_table_size_table(); SELECT diskquota.set_schema_quota('badquota', '1 MB'); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT size, segid FROM diskquota.table_size WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') ORDER BY segid DESC; diff --git a/tests/regress/sql/test_appendonly.sql b/tests/regress/sql/test_appendonly.sql index 020fbd89997..2d42f8df959 100644 --- a/tests/regress/sql/test_appendonly.sql +++ b/tests/regress/sql/test_appendonly.sql @@ -13,7 +13,7 @@ CREATE INDEX index_t2 ON t_aoco(i); INSERT INTO t_ao SELECT generate_series(1, 100); INSERT INTO t_aoco SELECT generate_series(1, 100); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- Query the size of t_ao. SELECT tableid::regclass, size @@ -34,7 +34,7 @@ SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); -- expect success. INSERT INTO t_ao SELECT generate_series(1, 1000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail. INSERT INTO t_ao SELECT generate_series(1, 10); diff --git a/tests/regress/sql/test_column.sql b/tests/regress/sql/test_column.sql index d71e6c0df80..b8698244f52 100644 --- a/tests/regress/sql/test_column.sql +++ b/tests/regress/sql/test_column.sql @@ -2,17 +2,17 @@ CREATE SCHEMA scolumn; SELECT diskquota.set_schema_quota('scolumn', '1 MB'); SET search_path TO scolumn; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); CREATE TABLE a2(i INT); -- expect fail INSERT INTO a2 SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail INSERT INTO a2 SELECT generate_series(1,10); ALTER TABLE a2 ADD COLUMN j VARCHAR(50); UPDATE a2 SET j = 'add value for column j'; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert failed after add column INSERT INTO a2 SELECT generate_series(1,10); diff --git a/tests/regress/sql/test_copy.sql b/tests/regress/sql/test_copy.sql index a3e2a6c300b..abac9fb7c81 100644 --- a/tests/regress/sql/test_copy.sql +++ b/tests/regress/sql/test_copy.sql @@ -7,7 +7,7 @@ CREATE TABLE c (i int); COPY c FROM '/tmp/csmall.txt'; -- expect failed INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect copy fail COPY c FROM '/tmp/csmall.txt'; diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql index 93e0e97f3ea..436fd2b97eb 100644 --- a/tests/regress/sql/test_ctas_role.sql +++ b/tests/regress/sql/test_ctas_role.sql @@ -2,35 +2,37 @@ SELECT diskquota.enable_hardlimit(); CREATE ROLE hardlimit_r; SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 10000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- temp table CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- toast table -CREATE TABLE toast_table AS SELECT ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000); -SELECT pg_sleep(5); +CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT * FROM generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -SELECT pg_sleep(5); + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS toast_table; DROP TABLE IF EXISTS ao_table; DROP TABLE IF EXISTS aocs_table; RESET ROLE; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; +SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_ctas_schema.sql b/tests/regress/sql/test_ctas_schema.sql index b8b72b58554..e21375b8690 100644 --- a/tests/regress/sql/test_ctas_schema.sql +++ b/tests/regress/sql/test_ctas_schema.sql @@ -3,25 +3,24 @@ SELECT diskquota.enable_hardlimit(); CREATE SCHEMA hardlimit_s; SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SET search_path TO hardlimit_s; -SELECT pg_sleep(5); -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups. SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_ctas_tablespace_role.sql b/tests/regress/sql/test_ctas_tablespace_role.sql index a9b7c23304c..ccb38ce5107 100644 --- a/tests/regress/sql/test_ctas_tablespace_role.sql +++ b/tests/regress/sql/test_ctas_tablespace_role.sql @@ -8,6 +8,7 @@ SELECT diskquota.enable_hardlimit(); DROP TABLESPACE IF EXISTS ctas_rolespc; CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; CREATE ROLE hardlimit_r; +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); SET default_tablespace = ctas_rolespc; @@ -15,21 +16,21 @@ SET ROLE hardlimit_r; -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups. DROP TABLE IF EXISTS t1; @@ -41,5 +42,6 @@ RESET ROLE; RESET default_tablespace; DROP TABLESPACE ctas_rolespc; \! rm -rf /tmp/ctas_rolespc; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_ctas_tablespace_schema.sql b/tests/regress/sql/test_ctas_tablespace_schema.sql index a7dc9916c93..baebf79e74d 100644 --- a/tests/regress/sql/test_ctas_tablespace_schema.sql +++ b/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -15,21 +15,21 @@ SET default_tablespace = ctas_schemaspc; -- heap table CREATE TABLE t1 AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups DROP TABLE IF EXISTS t1; diff --git a/tests/regress/sql/test_delete_quota.sql b/tests/regress/sql/test_delete_quota.sql index dbcbe113b45..b4c6572c940 100644 --- a/tests/regress/sql/test_delete_quota.sql +++ b/tests/regress/sql/test_delete_quota.sql @@ -6,11 +6,11 @@ SET search_path TO deleteschema; CREATE TABLE c (i INT); -- expect failed INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail INSERT INTO c SELECT generate_series(1,100); SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO c SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_drop_table.sql b/tests/regress/sql/test_drop_table.sql index 80176d58825..f5e22b61f3f 100644 --- a/tests/regress/sql/test_drop_table.sql +++ b/tests/regress/sql/test_drop_table.sql @@ -7,15 +7,13 @@ CREATE TABLE a2(i INT); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); DROP TABLE a; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a2 SELECT generate_series(1,100); DROP TABLE a2; RESET search_path; DROP SCHEMA sdrtbl; - - diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 8d612d66358..0f243493138 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -26,7 +26,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -50,7 +50,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -62,7 +62,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -74,7 +74,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -86,7 +86,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -98,7 +98,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -110,7 +110,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -122,7 +122,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; diff --git a/tests/regress/sql/test_fast_disk_check.sql b/tests/regress/sql/test_fast_disk_check.sql index 1e973ada00f..4b92a5bcb77 100644 --- a/tests/regress/sql/test_fast_disk_check.sql +++ b/tests/regress/sql/test_fast_disk_check.sql @@ -4,7 +4,7 @@ SET search_path to s1; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,200000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; RESET search_path; DROP TABLE s1.a; diff --git a/tests/regress/sql/test_index.sql b/tests/regress/sql/test_index.sql index 4dca18d5991..e9201a73bd4 100644 --- a/tests/regress/sql/test_index.sql +++ b/tests/regress/sql/test_index.sql @@ -10,20 +10,20 @@ SET search_path TO indexschema1; CREATE TABLE test_index_a(i int) TABLESPACE indexspc; INSERT INTO test_index_a SELECT generate_series(1,20000); SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and relname='test_index_a' and segid=-1; -- create index for the table, index in default tablespace CREATE INDEX a_index ON test_index_a(i); INSERT INTO test_index_a SELECT generate_series(1,10000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO test_index_a SELECT generate_series(1,100); SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; -- add index to tablespace indexspc ALTER index a_index SET TABLESPACE indexspc; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; -- expect insert fail @@ -31,11 +31,11 @@ INSERT INTO test_index_a SELECT generate_series(1,100); -- index tablespace quota exceeded ALTER table test_index_a SET TABLESPACE pg_default; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO test_index_a SELECT generate_series(1,100); INSERT INTO test_index_a SELECT generate_series(1,200000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO test_index_a SELECT generate_series(1,100); RESET search_path; diff --git a/tests/regress/sql/test_many_active_tables.sql b/tests/regress/sql/test_many_active_tables.sql index 36e7d4f5dc2..4c617cf6222 100644 --- a/tests/regress/sql/test_many_active_tables.sql +++ b/tests/regress/sql/test_many_active_tables.sql @@ -6,12 +6,12 @@ INSERT INTO t1 SELECT pk, val FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; DROP TABLE t1; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; diff --git a/tests/regress/sql/test_partition.sql b/tests/regress/sql/test_partition.sql index aaff1b049c5..095b36f1e43 100644 --- a/tests/regress/sql/test_partition.sql +++ b/tests/regress/sql/test_partition.sql @@ -15,18 +15,18 @@ CREATE TABLE measurement ( ); INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; DELETE FROM measurement WHERE logdate='2006-03-02'; VACUUM FULL measurement; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; diff --git a/tests/regress/sql/test_pause_and_resume.sql b/tests/regress/sql/test_pause_and_resume.sql index 437146e71cb..0f82f51a127 100644 --- a/tests/regress/sql/test_pause_and_resume.sql +++ b/tests/regress/sql/test_pause_and_resume.sql @@ -9,7 +9,7 @@ CREATE TABLE b(i int); INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.set_schema_quota('s1', '1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -- expect insert fail diff --git a/tests/regress/sql/test_primary_failure.sql b/tests/regress/sql/test_primary_failure.sql index ddc0f1574fc..ffe44466a56 100644 --- a/tests/regress/sql/test_primary_failure.sql +++ b/tests/regress/sql/test_primary_failure.sql @@ -30,7 +30,7 @@ $$ language plpythonu; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); @@ -52,18 +52,14 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); -select pg_sleep(10); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -select pg_sleep(15); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); -select pg_sleep(10); select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -select pg_sleep(10); -- check GPDB status select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; -SELECT pg_sleep(10); -- end_ignore +SELECT diskquota.wait_for_worker_new_epoch(); SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_relation_cache.sql b/tests/regress/sql/test_relation_cache.sql index c5371e0df50..3661ed3edfa 100644 --- a/tests/regress/sql/test_relation_cache.sql +++ b/tests/regress/sql/test_relation_cache.sql @@ -19,7 +19,7 @@ insert into t select generate_series(1, 100000); select count(*) from diskquota.show_relation_cache_all_seg(); commit; -select pg_sleep(5); +select diskquota.wait_for_worker_new_epoch(); select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; @@ -33,7 +33,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); select diskquota.check_relation_cache(); commit; -select pg_sleep(5); +select diskquota.wait_for_worker_new_epoch(); select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; @@ -47,7 +47,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); select diskquota.check_relation_cache(); commit; -select pg_sleep(5); +select diskquota.wait_for_worker_new_epoch(); select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; @@ -61,7 +61,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); select diskquota.check_relation_cache(); commit; -select pg_sleep(5); +select diskquota.wait_for_worker_new_epoch(); select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; diff --git a/tests/regress/sql/test_rename.sql b/tests/regress/sql/test_rename.sql index 1411ecf5f53..b6b4390f8ec 100644 --- a/tests/regress/sql/test_rename.sql +++ b/tests/regress/sql/test_rename.sql @@ -5,7 +5,7 @@ set search_path to srs1; CREATE TABLE a(i int); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ALTER SCHEMA srs1 RENAME TO srs2; @@ -32,7 +32,7 @@ ALTER TABLE a OWNER TO srerole; -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); ALTER ROLE srerole RENAME TO srerole2; diff --git a/tests/regress/sql/test_reschema.sql b/tests/regress/sql/test_reschema.sql index 723bcb15f91..3814c8fbac0 100644 --- a/tests/regress/sql/test_reschema.sql +++ b/tests/regress/sql/test_reschema.sql @@ -5,12 +5,12 @@ SET search_path TO srE; CREATE TABLE a(i int); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail when exceed quota limit INSERT INTO a SELECT generate_series(1,1000); -- set schema quota larger SELECT diskquota.set_schema_quota('srE', '1 GB'); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a SELECT generate_series(1,1000); diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index 3847b7d86c7..d78604a6e5e 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -15,24 +15,24 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); -- Delete role quota SELECT diskquota.set_role_quota('u1', '-1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); -- Reset role quota SELECT diskquota.set_role_quota('u1', '1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed diff --git a/tests/regress/sql/test_schema.sql b/tests/regress/sql/test_schema.sql index b0aff40ba6b..932199db470 100644 --- a/tests/regress/sql/test_schema.sql +++ b/tests/regress/sql/test_schema.sql @@ -4,11 +4,11 @@ SET search_path TO s1; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100); --- expect insert fail +-- expect insert success INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); + SELECT diskquota.set_schema_quota('s1', '1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); CREATE TABLE a2(i int); @@ -18,7 +18,7 @@ INSERT INTO a2 SELECT generate_series(1,100); -- Test alter table set schema CREATE SCHEMA s2; ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed @@ -28,7 +28,7 @@ ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; RESET search_path; diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index 87b195f738e..eaf116bf1b8 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -18,13 +18,12 @@ ALTER TABLE b2 OWNER TO rolespcu1; INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- expect insert fail @@ -35,13 +34,13 @@ SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM -- Test alter owner ALTER TABLE b OWNER TO rolespcu2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespcu1; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); @@ -52,29 +51,29 @@ INSERT INTO b SELECT generate_series(1,100); DROP TABLESPACE IF EXISTS rolespc2; CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; ALTER TABLE b SET TABLESPACE rolespc2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- alter table b back to tablespace rolespc ALTER TABLE b SET TABLESPACE rolespc; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- Test update quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,1000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql index 0920aaeb39d..a6159872a54 100644 --- a/tests/regress/sql/test_tablespace_role_perseg.sql +++ b/tests/regress/sql/test_tablespace_role_perseg.sql @@ -19,12 +19,12 @@ SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', INSERT INTO b SELECT generate_series(1,100); -- expect insert success INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- change tablespace role quota SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); @@ -32,16 +32,16 @@ INSERT INTO b SELECT generate_series(1,100); SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail by tablespace schema perseg quota INSERT INTO b SELECT generate_series(1,100); -- Test alter owner ALTER TABLE b OWNER TO rolespc_persegu2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); @@ -52,40 +52,40 @@ INSERT INTO b SELECT generate_series(1,100); DROP TABLESPACE IF EXISTS rolespc_perseg2; CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; ALTER TABLE b SET TABLESPACE rolespc_perseg2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO b SELECT generate_series(1,100); -- alter table b back to tablespace rolespc_perseg ALTER TABLE b SET TABLESPACE rolespc_perseg; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; -- expect insert success INSERT INTO b SELECT generate_series(1,100); SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- Test delete per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_tablespace_schema.sql b/tests/regress/sql/test_tablespace_schema.sql index db584007f18..b57f2180a9c 100644 --- a/tests/regress/sql/test_tablespace_schema.sql +++ b/tests/regress/sql/test_tablespace_schema.sql @@ -11,9 +11,9 @@ CREATE TABLE a(i int) TABLESPACE schemaspc; INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); + SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); CREATE TABLE a2(i int) TABLESPACE schemaspc; @@ -23,13 +23,13 @@ INSERT INTO a2 SELECT generate_series(1,100); -- Test alter table set schema CREATE SCHEMA spcs2; ALTER TABLE spcs1.a SET SCHEMA spcs2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed INSERT INTO spcs2.a SELECT generate_series(1,200); ALTER TABLE spcs2.a SET SCHEMA spcs1; -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; @@ -41,28 +41,28 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR DROP TABLESPACE IF EXISTS schemaspc2; CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; ALTER TABLE a SET TABLESPACE schemaspc2; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a SELECT generate_series(1,200); ALTER TABLE a SET TABLESPACE schemaspc; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -- Test update quota config SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,1000000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -- Test delete quota config SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index b95c7067bb4..36cdeca9f67 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -13,29 +13,29 @@ CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail by tablespace schema diskquota INSERT INTO a SELECT generate_series(1,100); -- change tablespace schema quota SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail by tablespace schema perseg quota INSERT INTO a SELECT generate_series(1,100); -- Test alter table set schema CREATE SCHEMA spcs2_perseg; ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; @@ -47,38 +47,38 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR DROP TABLESPACE IF EXISTS schemaspc_perseg2; CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; ALTER TABLE a SET TABLESPACE schemaspc_perseg2; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a SELECT generate_series(1,200); ALTER TABLE a SET TABLESPACE schemaspc_perseg; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail INSERT INTO a SELECT generate_series(1,100); -- Test delete per segment ratio SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail INSERT INTO a SELECT generate_series(1,100); -- Test delete tablespace schema quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; diff --git a/tests/regress/sql/test_temp_role.sql b/tests/regress/sql/test_temp_role.sql index f53ec7ed125..b863098f500 100644 --- a/tests/regress/sql/test_temp_role.sql +++ b/tests/regress/sql/test_temp_role.sql @@ -11,11 +11,11 @@ ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table INSERT INTO ta SELECT generate_series(1,100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); -- expected failed: INSERT INTO a SELECT generate_series(1,100); DROP TABLE ta; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; diff --git a/tests/regress/sql/test_toast.sql b/tests/regress/sql/test_toast.sql index 89a62013060..98ab45e5926 100644 --- a/tests/regress/sql/test_toast.sql +++ b/tests/regress/sql/test_toast.sql @@ -9,7 +9,7 @@ SELECT (SELECT FROM generate_series(1,10000)) FROM generate_series(1,10000); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert toast fail INSERT INTO a5 SELECT (SELECT diff --git a/tests/regress/sql/test_truncate.sql b/tests/regress/sql/test_truncate.sql index 79b2a0f5ad1..057d6aeb155 100644 --- a/tests/regress/sql/test_truncate.sql +++ b/tests/regress/sql/test_truncate.sql @@ -5,12 +5,12 @@ SET search_path TO s7; CREATE TABLE a (i int); CREATE TABLE b (i int); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); TRUNCATE TABLE a; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a SELECT generate_series(1,30); INSERT INTO b SELECT generate_series(1,30); diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql index 4e682ed3311..0c3804de928 100644 --- a/tests/regress/sql/test_uncommitted_table_size.sql +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -2,7 +2,7 @@ begin; CREATE TEMP TABLE t1(i int); INSERT INTO t1 SELECT generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; SELECT pg_table_size('t1'); commit; @@ -13,7 +13,7 @@ DROP table t1; begin; CREATE TABLE t2(i int); INSERT INTO t2 SELECT generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; SELECT pg_table_size('t2'); commit; @@ -21,7 +21,7 @@ commit; -- heap table index begin; CREATE INDEX idx2 on t2(i); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; SELECT pg_table_size('idx2'); commit; @@ -32,7 +32,7 @@ DROP table t2; begin; CREATE TABLE t3(t text); INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; SELECT pg_table_size('t3'); commit; @@ -43,7 +43,7 @@ DROP table t3; begin; CREATE TABLE ao (i int) WITH (appendonly=true); INSERT INTO ao SELECT generate_series(1, 100000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= (SELECT pg_table_size('ao')); commit; @@ -51,7 +51,7 @@ commit; -- AO table index begin; CREATE INDEX ao_idx on ao(i); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; SELECT pg_table_size('ao_idx'); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; @@ -63,7 +63,7 @@ DROP TABLE ao; -- AO table CTAS begin; CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= (SELECT pg_table_size('ao')); commit; @@ -73,7 +73,7 @@ DROP TABLE ao; begin; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; SELECT pg_table_size('aocs'); commit; @@ -81,7 +81,7 @@ commit; -- AOCS table index begin; CREATE INDEX aocs_idx on aocs(i); -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; SELECT pg_table_size('aocs_idx'); commit; @@ -91,7 +91,7 @@ DROP TABLE aocs; -- AOCS table CTAS begin; CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; -SELECT pg_sleep(5); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; SELECT pg_table_size('aocs'); commit; diff --git a/tests/regress/sql/test_update.sql b/tests/regress/sql/test_update.sql index c33da4bb954..5745f262151 100644 --- a/tests/regress/sql/test_update.sql +++ b/tests/regress/sql/test_update.sql @@ -4,7 +4,7 @@ SELECT diskquota.set_schema_quota('s4', '1 MB'); SET search_path TO s4; CREATE TABLE a(i int); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect update fail. UPDATE a SET i = 100; DROP TABLE a; diff --git a/tests/regress/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql index e4926273914..f2c4cb093fa 100644 --- a/tests/regress/sql/test_vacuum.sql +++ b/tests/regress/sql/test_vacuum.sql @@ -5,14 +5,14 @@ SET search_path TO s6; CREATE TABLE a (i int); CREATE TABLE b (i int); INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -- expect insert fail INSERT INTO b SELECT generate_series(1,10); DELETE FROM a WHERE i > 10; VACUUM FULL a; -SELECT pg_sleep(20); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; -- expect insert succeed diff --git a/tests/regress/sql/test_worker_epoch.sql b/tests/regress/sql/test_worker_epoch.sql new file mode 100644 index 00000000000..493b43954fb --- /dev/null +++ b/tests/regress/sql/test_worker_epoch.sql @@ -0,0 +1,5 @@ +-- Test if the UDF returns successfully. +-- NOTE: This test should be the first one since the UDF is supposed +-- to be used in all other tests. + +SELECT diskquota.wait_for_worker_new_epoch(); From ae85ddfb1586fc916165687160c45a8d3a155c44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Wed, 19 Jan 2022 12:03:34 +0800 Subject: [PATCH 105/330] Wait until deleted tuples are dead before VACUUM (#118) Consider a user session that does a DELETE followed by a VACUUM FULL to reclaim the disk space. If, at the same time, the bgworker loads config by doing a SELECT, and the SELECT begins before the DELETE ends, while ends after the VACUUM FULL begins: bgw: ---------[ SELECT ]-----------> usr: ---[ DELETE ]-[ VACUUM FULL ]--> then the tuples deleted will be marked as RECENTLY_DEAD instead of DEAD. As a result, the deleted tuples cannot be removed by VACUUM FULL. The fix lets the user session wait for the bgworker to finish the current SELECT before starting VACUUM FULL . --- tests/regress/expected/test_partition.out | 6 ++++++ tests/regress/expected/test_vacuum.out | 6 ++++++ tests/regress/sql/test_partition.sql | 1 + tests/regress/sql/test_vacuum.sql | 1 + 4 files changed, 14 insertions(+) diff --git a/tests/regress/expected/test_partition.out b/tests/regress/expected/test_partition.out index 81e67202426..b095ff05642 100644 --- a/tests/regress/expected/test_partition.out +++ b/tests/regress/expected/test_partition.out @@ -45,6 +45,12 @@ ERROR: schema's disk space quota exceeded with name:s8 INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; ERROR: schema's disk space quota exceeded with name:s8 DELETE FROM measurement WHERE logdate='2006-03-02'; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + VACUUM FULL measurement; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out index db179c654c6..d8319d2b5bb 100644 --- a/tests/regress/expected/test_vacuum.out +++ b/tests/regress/expected/test_vacuum.out @@ -27,6 +27,12 @@ ERROR: schema's disk space quota exceeded with name:s6 INSERT INTO b SELECT generate_series(1,10); ERROR: schema's disk space quota exceeded with name:s6 DELETE FROM a WHERE i > 10; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + VACUUM FULL a; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/sql/test_partition.sql b/tests/regress/sql/test_partition.sql index 095b36f1e43..2409f59854f 100644 --- a/tests/regress/sql/test_partition.sql +++ b/tests/regress/sql/test_partition.sql @@ -25,6 +25,7 @@ INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -- expect insert fail INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; DELETE FROM measurement WHERE logdate='2006-03-02'; +SELECT diskquota.wait_for_worker_new_epoch(); VACUUM FULL measurement; SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; diff --git a/tests/regress/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql index f2c4cb093fa..2ddc35ed29b 100644 --- a/tests/regress/sql/test_vacuum.sql +++ b/tests/regress/sql/test_vacuum.sql @@ -11,6 +11,7 @@ INSERT INTO a SELECT generate_series(1,10); -- expect insert fail INSERT INTO b SELECT generate_series(1,10); DELETE FROM a WHERE i > 10; +SELECT diskquota.wait_for_worker_new_epoch(); VACUUM FULL a; SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; From d04e5ef1bebe11534bed763786e49ea4bb4d686c Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 19 Jan 2022 20:58:13 +0800 Subject: [PATCH 106/330] Make altered relation's oid active when performing 'VACUUM FULL'. (#116) When doing VACUUM FULL, The table size may not be updated if the table's oid is pulled before its relfilenode is swapped. This fix keeps the table's oid in the shared memory if the table is being altered, i.e., is locked in ACCESS EXCLUSIVE mode. Co-authored-by: Xuebin Su --- diskquota.h | 1 + gp_activetable.c | 159 ++++++++++++++++++-- gp_activetable.h | 1 + quotamodel.c | 2 + tests/isolation2/expected/test_truncate.out | 79 ++++++++++ tests/isolation2/expected/test_vacuum.out | 92 +++++++++++ tests/isolation2/isolation2_schedule | 2 + tests/isolation2/sql/test_truncate.sql | 29 ++++ tests/isolation2/sql/test_vacuum.sql | 41 +++++ 9 files changed, 396 insertions(+), 10 deletions(-) create mode 100644 tests/isolation2/expected/test_truncate.out create mode 100644 tests/isolation2/expected/test_vacuum.out create mode 100644 tests/isolation2/sql/test_truncate.sql create mode 100644 tests/isolation2/sql/test_vacuum.sql diff --git a/diskquota.h b/diskquota.h index d3d9c415bf6..bc54a69dd5e 100644 --- a/diskquota.h +++ b/diskquota.h @@ -40,6 +40,7 @@ struct DiskQuotaLocks LWLock *relation_cache_lock; LWLock *hardlimit_lock; LWLock *worker_map_lock; + LWLock *altered_reloid_cache_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; #define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) diff --git a/gp_activetable.c b/gp_activetable.c index 573624e0ac8..00cb45a6dba 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -17,6 +17,7 @@ #include "access/htup_details.h" #include "access/xact.h" +#include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" @@ -58,6 +59,7 @@ typedef struct DiskQuotaSetOFCache HTAB *active_tables_map = NULL; HTAB *monitoring_dbid_cache = NULL; +HTAB *altered_reloid_cache = NULL; /* active table hooks which detect the disk file size change. */ static file_create_hook_type prev_file_create_hook = NULL; @@ -80,6 +82,7 @@ static StringInfoData convert_map_to_string(HTAB *active_list); static void load_table_size(HTAB *local_table_stats_map); static void report_active_table_helper(const RelFileNodeBackend *relFileNode); static void report_relation_cache_helper(Oid relid); +static void report_altered_reloid(Oid reloid); void init_active_table_hook(void); void init_shm_worker_active_tables(void); @@ -95,16 +98,24 @@ init_shm_worker_active_tables(void) HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.hash = tag_hash; - active_tables_map = ShmemInitHash("active_tables", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, HASH_ELEM | HASH_FUNCTION); + + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + ctl.hash = tag_hash; + altered_reloid_cache = ShmemInitHash("altered_reloid_cache", + diskquota_max_active_tables, + diskquota_max_active_tables, + &ctl, + HASH_ELEM | HASH_FUNCTION); } /* @@ -138,6 +149,7 @@ active_table_hook_smgrcreate(RelFileNodeBackend rnode) if (prev_file_create_hook) (*prev_file_create_hook) (rnode); + SIMPLE_FAULT_INJECTOR("diskquota_after_smgrcreate"); report_active_table_helper(&rnode); } @@ -168,7 +180,7 @@ active_table_hook_smgrtruncate(RelFileNodeBackend rnode) report_active_table_helper(&rnode); } -static void +static void active_table_hook_smgrunlink(RelFileNodeBackend rnode) { if (prev_file_unlink_hook) @@ -194,12 +206,33 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, return; } - if (access != OAT_POST_CREATE) + switch (access) { - return; + case OAT_POST_CREATE: + report_relation_cache_helper(objectId); + break; + case OAT_POST_ALTER: + SIMPLE_FAULT_INJECTOR("object_access_post_alter"); + report_altered_reloid(objectId); + break; + default: + break; } +} - report_relation_cache_helper(objectId); +static void +report_altered_reloid(Oid reloid) +{ + /* + * We don't collect altered relations' reloid on mirrors + * and QD. + */ + if (IsRoleMirror() || IS_QUERY_DISPATCHER()) + return; + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); + hash_search(altered_reloid_cache, &reloid, HASH_ENTER, NULL); + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); } static void @@ -564,6 +597,34 @@ get_active_tables_stats(ArrayType *array) return local_table; } +/* + * SetLocktagRelationOid + * Set up a locktag for a relation, given only relation OID + */ +static inline void +SetLocktagRelationOid(LOCKTAG *tag, Oid relid) +{ + Oid dbid; + + if (IsSharedRelation(relid)) + dbid = InvalidOid; + else + dbid = MyDatabaseId; + + SET_LOCKTAG_RELATION(*tag, dbid, relid); +} + +static bool +is_relation_being_altered(Oid relid) +{ + LOCKTAG locktag; + SetLocktagRelationOid(&locktag, relid); + VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); + bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ + pfree(vxid_list); + return being_altered; +} + /* * Get local active table with table oid and table size info. * This function first copies active table map from shared memory @@ -577,9 +638,11 @@ get_active_tables_oid(void) HASHCTL ctl; HTAB *local_active_table_file_map = NULL; HTAB *local_active_table_stats_map = NULL; + HTAB *local_altered_reloid_cache = NULL; HASH_SEQ_STATUS iter; DiskQuotaActiveTableFileEntry *active_table_file_entry; DiskQuotaActiveTableEntry *active_table_entry; + Oid *altered_reloid_entry; Oid relOid; @@ -588,12 +651,21 @@ get_active_tables_oid(void) ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.hcxt = CurrentMemoryContext; ctl.hash = tag_hash; - local_active_table_file_map = hash_create("local active table map with relfilenode info", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; + local_altered_reloid_cache = hash_create("local_altered_reloid_cache", + 1024, + &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + /* Move active table from shared memory to local active table map */ LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); @@ -616,6 +688,7 @@ get_active_tables_oid(void) *entry = *active_table_file_entry; hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); } + // TODO: hash_seq_term(&iter); LWLockRelease(diskquota_locks.active_table_lock); memset(&ctl, 0, sizeof(ctl)); @@ -648,7 +721,7 @@ get_active_tables_oid(void) rnode.relNode = active_table_file_entry->relfilenode; rnode.spcNode = active_table_file_entry->tablespaceoid; relOid = get_relid_by_relfilenode(rnode); - + if (relOid != InvalidOid) { prelid = get_primary_table_oid(relOid); @@ -660,10 +733,76 @@ get_active_tables_oid(void) active_table_entry->tablesize = 0; active_table_entry->segid = -1; } - hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); + if (!is_relation_being_altered(relOid)) + hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); } } + // TODO: hash_seq_term(&iter); + + /* Adding the remaining relfilenodes back to the map in the shared memory */ + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, local_active_table_file_map); + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + { + /* TODO: handle possible ERROR here so that the bgworker will not go down. */ + hash_search(active_tables_map, active_table_file_entry, HASH_ENTER, NULL); + } + /* TODO: hash_seq_term(&iter); */ + LWLockRelease(diskquota_locks.active_table_lock); + + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_SHARED); + hash_seq_init(&iter, altered_reloid_cache); + while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + { + bool found; + Oid altered_oid = *altered_reloid_entry; + if (OidIsValid(*altered_reloid_entry)) + { + active_table_entry = hash_search(local_active_table_stats_map, + &altered_oid, + HASH_ENTER, &found); + if (!found && active_table_entry) + { + active_table_entry->reloid = altered_oid; + /* We don't care segid and tablesize here. */ + active_table_entry->tablesize = 0; + active_table_entry->segid = -1; + } + } + hash_search(local_altered_reloid_cache, + &altered_oid, HASH_ENTER, NULL); + } + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); + + hash_seq_init(&iter, local_altered_reloid_cache); + while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + { + if (OidIsValid(*altered_reloid_entry) && + !is_relation_being_altered(*altered_reloid_entry)) + { + hash_search(local_altered_reloid_cache, + altered_reloid_entry, HASH_REMOVE, NULL); + } + } + + LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, altered_reloid_cache); + while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + { + bool found; + Oid altered_reloid = *altered_reloid_entry; + hash_search(local_altered_reloid_cache, &altered_reloid, + HASH_FIND, &found); + if (!found) + { + hash_search(altered_reloid_cache, &altered_reloid, + HASH_REMOVE, NULL); + } + } + LWLockRelease(diskquota_locks.altered_reloid_cache_lock); + /* * If cannot convert relfilenode to relOid, put them back to shared memory * and wait for the next check. @@ -684,6 +823,7 @@ get_active_tables_oid(void) LWLockRelease(diskquota_locks.active_table_lock); } hash_destroy(local_active_table_file_map); + hash_destroy(local_altered_reloid_cache); return local_active_table_stats_map; } @@ -804,7 +944,6 @@ convert_map_to_string(HTAB *local_active_table_oid_maps) return buffer; } - /* * Get active table size from all the segments based on * active table oid list. diff --git a/gp_activetable.h b/gp_activetable.h index 09bdebf250e..5b194f26c76 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -32,6 +32,7 @@ extern void init_lock_active_tables(void); extern HTAB *active_tables_map; extern HTAB *monitoring_dbid_cache; +extern HTAB *altered_reloid_cache; #define atooid(x) ((Oid) strtoul((x), NULL, 10)) diff --git a/quotamodel.c b/quotamodel.c index 75d2ac69a8d..d741a1d7454 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -515,6 +515,7 @@ init_lwlocks(void) diskquota_locks.relation_cache_lock = LWLockAssign(); diskquota_locks.hardlimit_lock = LWLockAssign(); diskquota_locks.worker_map_lock = LWLockAssign(); + diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); } /* @@ -533,6 +534,7 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(DiskQuotaWorkerEntry))); + size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); size += sizeof(bool); /* sizeof(*diskquota_paused) */ size += sizeof(bool); /* sizeof(*diskquota_hardlimit) */ return size; diff --git a/tests/isolation2/expected/test_truncate.out b/tests/isolation2/expected/test_truncate.out new file mode 100644 index 00000000000..dd195548485 --- /dev/null +++ b/tests/isolation2/expected/test_truncate.out @@ -0,0 +1,79 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: TRUNCATE dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +TRUNCATE + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/tests/isolation2/expected/test_vacuum.out b/tests/isolation2/expected/test_vacuum.out new file mode 100644 index 00000000000..978aaf2c446 --- /dev/null +++ b/tests/isolation2/expected/test_vacuum.out @@ -0,0 +1,92 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +DELETE FROM dummy_t1; +DELETE 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: VACUUM FULL dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +VACUUM + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 85ec69a9961..c300d301330 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -2,4 +2,6 @@ test: init test: prepare test: test_relation_size test: test_blackmap +test: test_vacuum +test: test_truncate test: cleanup diff --git a/tests/isolation2/sql/test_truncate.sql b/tests/isolation2/sql/test_truncate.sql new file mode 100644 index 00000000000..957d554e8f6 --- /dev/null +++ b/tests/isolation2/sql/test_truncate.sql @@ -0,0 +1,29 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int); +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +SELECT diskquota.wait_for_worker_new_epoch(); +1&: TRUNCATE dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +1<: + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +DROP TABLE dummy_t1; diff --git a/tests/isolation2/sql/test_vacuum.sql b/tests/isolation2/sql/test_vacuum.sql new file mode 100644 index 00000000000..c724e50a8d8 --- /dev/null +++ b/tests/isolation2/sql/test_vacuum.sql @@ -0,0 +1,41 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int); +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +DELETE FROM dummy_t1; +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +SELECT diskquota.wait_for_worker_new_epoch(); +1&: VACUUM FULL dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; +1<: + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); +SELECT tableid::regclass, size, segid FROM diskquota.table_size + WHERE tableid='dummy_t1'::regclass ORDER BY segid; +DROP TABLE dummy_t1; From 5ebc81d50068ee7ff8291393d90d158997729ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 20 Jan 2022 14:05:19 +0800 Subject: [PATCH 107/330] Refactor pause() to skip refreshing quota (#119) Currently, diskquota.pause() only takes effect on quota checking. Bgworkers still go over the loop to refreshing quota even if diskquota is paused. This wastes computation resources and can cause flaky issues. This fix makes bgworkers skip refreshing quota when the user pauses diskquota entirely to avoid those issues. Table sizes can be updated correctly after resume. --- diskquota.c | 12 +++++- diskquota.h | 1 + quotamodel.c | 7 +--- tests/regress/diskquota_schedule | 2 +- .../expected/test_pause_and_resume.out | 42 +++++++++++++------ tests/regress/sql/test_pause_and_resume.sql | 21 +++++----- 6 files changed, 54 insertions(+), 31 deletions(-) diff --git a/diskquota.c b/diskquota.c index 3cd7b861961..29534b9a3d6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -79,6 +79,15 @@ static int num_db = 0; */ bool *diskquota_paused = NULL; +bool +diskquota_is_paused() +{ + LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); + bool paused = *diskquota_paused; + LWLockRelease(diskquota_locks.paused_lock); + return paused; +} + /* functions of disk quota*/ void _PG_init(void); void _PG_fini(void); @@ -365,7 +374,8 @@ disk_quota_worker_main(Datum main_arg) } /* Do the work */ - refresh_disk_quota_model(false); + if (!diskquota_is_paused()) + refresh_disk_quota_model(false); worker_increase_epoch(MyDatabaseId); } diff --git a/diskquota.h b/diskquota.h index bc54a69dd5e..ee5ca02e9e6 100644 --- a/diskquota.h +++ b/diskquota.h @@ -145,5 +145,6 @@ extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); extern bool worker_increase_epoch(Oid database_oid); extern unsigned int worker_get_epoch(Oid database_oid); +extern bool diskquota_is_paused(); #endif diff --git a/quotamodel.c b/quotamodel.c index d741a1d7454..ddc1645a9a1 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1599,17 +1599,12 @@ check_blackmap_by_reloid(Oid reloid) bool quota_check_common(Oid reloid, RelFileNode *relfilenode) { - bool paused; bool enable_hardlimit; if (!IsTransactionState()) return true; - LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); - paused = *diskquota_paused; - LWLockRelease(diskquota_locks.paused_lock); - - if (paused) + if (diskquota_is_paused()) return true; if (OidIsValid(reloid)) diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 7cc4bd64790..bfca21a78c0 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -4,6 +4,7 @@ test: test_worker_epoch test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size +test: test_pause_and_resume # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check @@ -16,7 +17,6 @@ test: test_vacuum test: test_primary_failure test: test_extension test: test_manytable -test: test_pause_and_resume test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly diff --git a/tests/regress/expected/test_pause_and_resume.out b/tests/regress/expected/test_pause_and_resume.out index 36edad17637..a2ecfd94c26 100644 --- a/tests/regress/expected/test_pause_and_resume.out +++ b/tests/regress/expected/test_pause_and_resume.out @@ -4,9 +4,6 @@ SET search_path TO s1; CREATE TABLE a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE b(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert succeed INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.set_schema_quota('s1', '1 MB'); @@ -24,9 +21,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 -- pause extension SELECT diskquota.pause(); pause @@ -34,10 +28,21 @@ SELECT diskquota.pause(); (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 3932160 | -1 +(1 row) + -- expect insert succeed -INSERT INTO a SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100000); -- resume extension SELECT diskquota.resume(); resume @@ -45,12 +50,23 @@ SELECT diskquota.resume(); (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 +-- table size should be updated after resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 7569408 | -1 +(1 row) + RESET search_path; -DROP TABLE s1.a, s1.b; +DROP TABLE s1.a; DROP SCHEMA s1; diff --git a/tests/regress/sql/test_pause_and_resume.sql b/tests/regress/sql/test_pause_and_resume.sql index 0f82f51a127..00bfafb1466 100644 --- a/tests/regress/sql/test_pause_and_resume.sql +++ b/tests/regress/sql/test_pause_and_resume.sql @@ -3,7 +3,6 @@ CREATE SCHEMA s1; SET search_path TO s1; CREATE TABLE a(i int); -CREATE TABLE b(i int); -- expect insert succeed INSERT INTO a SELECT generate_series(1,100000); @@ -12,26 +11,28 @@ SELECT diskquota.set_schema_quota('s1', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -- pause extension SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; -- expect insert succeed -INSERT INTO a SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100000); -- resume extension SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); + +-- table size should be updated after resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; RESET search_path; -DROP TABLE s1.a, s1.b; +DROP TABLE s1.a; DROP SCHEMA s1; - From 9b027b4b44070dc351a407867228e465076ce219 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 24 Jan 2022 12:29:57 +0800 Subject: [PATCH 108/330] ci: create rhel8 release build. (#117) ci: create rhel8 release build. Signed-off-by: Sasasu Co-authored-by: Xuebin Su --- concourse/pipeline/pipeline.yml | 48 ++++++++++- concourse/pipeline/release_pipeline.yml | 109 +++++++++++++++++++++--- concourse/scripts/build_diskquota.sh | 11 ++- 3 files changed, 152 insertions(+), 16 deletions(-) diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml index 12397adb87c..9f1f7fbfcf2 100644 --- a/concourse/pipeline/pipeline.yml +++ b/concourse/pipeline/pipeline.yml @@ -13,6 +13,7 @@ groups: jobs: - diskquota_centos6_build_test - diskquota_centos7_build_test + - diskquota_rhel8_build_test - diskquota_ubuntu18_build_test - name: GPDB6_UPGRADE jobs: @@ -40,6 +41,22 @@ resources: repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test tag: latest +- name: rhel-image-dev-8 + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) + +- name: rhel-image-build-8 + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) + - name: ubuntu18-image-build type: registry-image source: @@ -88,13 +105,18 @@ resources: bucket: {{gcs-bucket-intermediates}} json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz +- name: bin_gpdb_rhel8 + type: gcs + source: + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_rhel8/bin_gpdb.tar.gz - name: bin_gpdb_centos7 type: gcs source: bucket: {{gcs-bucket-intermediates}} json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz - - name: bin_gpdb_ubuntu18 type: gcs source: @@ -133,6 +155,30 @@ jobs: params: file: diskquota_artifacts/diskquota*.tar.gz +- name: diskquota_rhel8_build_test + max_in_flight: 3 + plan: + - aggregate: + - get: rhel-image-dev-8 + - get: rhel-image-build-8 + - get: diskquota_src + trigger: true + - get: bin_gpdb + resource: bin_gpdb_rhel8 + - get: gpdb_src + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: rhel-image-build-8 + params: + DISKQUOTA_OS: rhel8 + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: rhel-image-dev-8 + input_mapping: + bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel8 + - name: diskquota_centos6_build_test max_in_flight: 3 plan: diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml index bfecd95d188..3c2665b4a0a 100644 --- a/concourse/pipeline/release_pipeline.yml +++ b/concourse/pipeline/release_pipeline.yml @@ -8,9 +8,11 @@ groups: - release_centos6 - release_centos7 - release_ubuntu18 + - release_rhel-8 - diskquota_centos6_test_release - diskquota_centos7_test_release - diskquota_ubuntu18_test_release + - diskquota_rhel8_test_release resource_types: - name: gcs @@ -23,29 +25,45 @@ resources: # Image Resources - name: centos-gpdb-dev-6 - type: docker-image + type: registry-image source: - repository: pivotaldata/centos-gpdb-dev - tag: '6-gcc6.2-llvm3.7' + repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test + tag: latest - name: centos-gpdb-dev-7 - type: docker-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test + tag: latest + +- name: rhel-image-dev-8 + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) + +- name: rhel-image-build-8 + type: registry-image source: - repository: pivotaldata/centos-gpdb-dev - tag: '7-gcc6.2-llvm3.7' + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) - name: ubuntu18-image-build - type: docker-image + type: registry-image source: - repository: pivotaldata/gpdb6-ubuntu18.04-build + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build tag: latest - + - name: ubuntu18-image-test - type: docker-image + type: registry-image source: - repository: pivotaldata/gpdb6-ubuntu18.04-test + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest - + # Github Source Codes @@ -58,7 +76,7 @@ resources: - name: diskquota_src type: git source: - branch: gpdb + branch: release/1.X uri: https://github.com/greenplum-db/diskquota.git tag_filter: 1.* @@ -78,6 +96,13 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz +- name: bin_gpdb_rhel8 + type: gcs + source: + bucket: {{gcs-bucket-intermediates}} + json_key: {{concourse-gcs-resources-service-account-key}} + versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_rhel8/bin_gpdb.tar.gz + - name: bin_gpdb_ubuntu18 type: gcs source: @@ -99,6 +124,13 @@ resources: json_key: {{concourse-gcs-resources-service-account-key}} regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz +- name: bin_diskquota_rhel8 + type: gcs + source: + bucket: {{gcs-bucket}} + json_key: {{concourse-gcs-resources-service-account-key}} + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz + - name: bin_diskquota_ubuntu18 type: gcs source: @@ -110,6 +142,35 @@ resources: ## ====================================================================== jobs: +- name: release_rhel-8 + max_in_flight: 3 + plan: + - aggregate: + - get: rhel-image-dev-8 + - get: rhel-image-build-8 + - get: diskquota_src + trigger: true + - get: bin_gpdb + resource: bin_gpdb_rhel8 + - get: gpdb_src + - aggregate: + - task: build_diskquota + file: diskquota_src/concourse/tasks/build_diskquota.yml + image: rhel-image-build-8 + params: + DISKQUOTA_OS: rhel8 + - aggregate: + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: rhel-image-dev-8 + input_mapping: + bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: rhel8 + - aggregate: + - put: bin_diskquota_rhel8 + params: + file: diskquota_artifacts/diskquota*.tar.gz - name: release_centos7 max_in_flight: 3 @@ -125,6 +186,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-7 + params: + DISKQUOTA_OS: rhel7 - aggregate: - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml @@ -152,6 +215,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: centos-gpdb-dev-6 + params: + DISKQUOTA_OS: rhel6 - aggregate: - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml @@ -180,6 +245,8 @@ jobs: - task: build_diskquota file: diskquota_src/concourse/tasks/build_diskquota.yml image: ubuntu18-image-build + params: + DISKQUOTA_OS: ubuntu18.04 - aggregate: - task: test_diskquota file: diskquota_src/concourse/tasks/test_diskquota.yml @@ -225,6 +292,22 @@ jobs: file: diskquota_src/concourse/tasks/test_diskquota.yml image: centos-gpdb-dev-7 +- name: diskquota_rhel8_test_release + max_in_flight: 3 + plan: + - aggregate: + - get: rhel-image-dev-8 + - get: diskquota_src + - get: bin_diskquota + resource: bin_diskquota_rhel8 + - get: bin_gpdb + resource: bin_gpdb_rhel8 + trigger: true + - get: gpdb_src + - task: test_diskquota + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: rhel-image-dev-8 + - name: diskquota_ubuntu18_test_release max_in_flight: 3 plan: diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 1eea6182432..d074ab76420 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -41,6 +41,13 @@ function pkg() { share/postgresql/extension/diskquota--2.0--1.0.sql \ install_gpdb_component ;; + rhel8) + tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel8_x86_64.tar.gz \ + lib/postgresql/diskquota.so \ + share/postgresql/extension/diskquota.control \ + share/postgresql/extension/diskquota--1.0.sql \ + install_gpdb_component + ;; ubuntu18.04) tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-ubuntu18.04_x86_64.tar.gz \ lib/postgresql/diskquota.so \ @@ -51,12 +58,12 @@ function pkg() { share/postgresql/extension/diskquota--2.0--1.0.sql \ install_gpdb_component ;; - *) echo "Unknown OS: $OSVER"; exit 1 ;; + *) echo "Unknown OS: $DISKQUOTA_OS"; exit 1 ;; esac popd } -function _main() { +function _main() { time install_gpdb time pkg } From fc216ffbf21894e679e5e90143f2b5c3f550a434 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 24 Jan 2022 17:12:46 +0800 Subject: [PATCH 109/330] set the scope of pause() to current database --- diskquota.c | 30 +-- diskquota.h | 8 +- diskquota_utility.c | 81 +++++++-- quotamodel.c | 11 +- tests/regress/diskquota_schedule | 1 + .../test_pause_and_resume_multiple_db.out | 171 ++++++++++++++++++ .../sql/test_pause_and_resume_multiple_db.sql | 77 ++++++++ 7 files changed, 337 insertions(+), 42 deletions(-) create mode 100644 tests/regress/expected/test_pause_and_resume_multiple_db.out create mode 100644 tests/regress/sql/test_pause_and_resume_multiple_db.sql diff --git a/diskquota.c b/diskquota.c index 29534b9a3d6..d20b6ebbb60 100644 --- a/diskquota.c +++ b/diskquota.c @@ -72,19 +72,25 @@ ExtensionDDLMessage *extension_ddl_message = NULL; HTAB *disk_quota_worker_map = NULL; static int num_db = 0; -/* - * diskquota_paused is a flag used to pause the extension (when the flag is - * enabled, the extension keeps counting the disk usage but doesn't emit an - * error when the disk usage limit is exceeded). - */ -bool *diskquota_paused = NULL; - bool diskquota_is_paused() { - LWLockAcquire(diskquota_locks.paused_lock, LW_SHARED); - bool paused = *diskquota_paused; - LWLockRelease(diskquota_locks.paused_lock); + Assert(MyDatabaseId != InvalidOid); + bool paused; + + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + { + DiskQuotaWorkerEntry *hash_entry; + bool found; + + hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, + (void*)&MyDatabaseId, + HASH_FIND, + &found); + paused = found ? hash_entry->is_paused : false; + } + LWLockRelease(diskquota_locks.worker_map_lock); + return paused; } @@ -376,6 +382,7 @@ disk_quota_worker_main(Datum main_arg) /* Do the work */ if (!diskquota_is_paused()) refresh_disk_quota_model(false); + worker_increase_epoch(MyDatabaseId); } @@ -1002,6 +1009,7 @@ start_worker_by_dboid(Oid dbid) workerentry->handle = handle; workerentry->pid = pid; workerentry->epoch = 0; + workerentry->is_paused = false; } LWLockRelease(diskquota_locks.worker_map_lock); @@ -1034,7 +1042,7 @@ worker_increase_epoch(Oid database_oid) bool found = false; DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); - + if (found) { ++(workerentry->epoch); diff --git a/diskquota.h b/diskquota.h index ee5ca02e9e6..c3a65b9be17 100644 --- a/diskquota.h +++ b/diskquota.h @@ -3,6 +3,7 @@ #include "storage/lwlock.h" #include "postmaster/bgworker.h" +#include "port/atomics.h" /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 @@ -36,7 +37,6 @@ struct DiskQuotaLocks LWLock *extension_ddl_message_lock; LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ LWLock *monitoring_dbid_cache_lock; - LWLock *paused_lock; LWLock *relation_cache_lock; LWLock *hardlimit_lock; LWLock *worker_map_lock; @@ -96,7 +96,6 @@ typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; -extern bool *diskquota_paused; extern bool *diskquota_hardlimit; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -106,7 +105,8 @@ struct DiskQuotaWorkerEntry { Oid dbid; pid_t pid; /* worker pid */ - unsigned int epoch; + unsigned int epoch; /* this counter will be increased after each worker loop */ + bool is_paused; /* true if this worker is paused */ BackgroundWorkerHandle *handle; }; @@ -145,6 +145,6 @@ extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); extern bool worker_increase_epoch(Oid database_oid); extern unsigned int worker_get_epoch(Oid database_oid); -extern bool diskquota_is_paused(); +extern bool diskquota_is_paused(void); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index 0637544e3a1..8a6c4f0f572 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -306,14 +306,19 @@ diskquota_start_worker(PG_FUNCTION_ARGS) * Dispatch pausing/resuming command to segments. */ static void -dispatch_pause_or_resume_command(bool pause_extension) +dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) { CdbPgResults cdb_pgresults = {NULL, 0}; int i; StringInfoData sql; initStringInfo(&sql); - appendStringInfo(&sql, "SELECT diskquota.%s", pause_extension ? "pause()" : "resume()"); + appendStringInfo(&sql, "SELECT diskquota.%s", pause_extension ? "pause" : "resume"); + if (dbid == InvalidOid) { + appendStringInfo(&sql, "()"); + } else { + appendStringInfo(&sql, "(%d)", dbid); + } CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); for (i = 0; i < cdb_pgresults.numResults; ++i) @@ -332,10 +337,9 @@ dispatch_pause_or_resume_command(bool pause_extension) } /* - * Set diskquota_paused to true. - * This function is called by user. After this function being called, diskquota - * keeps counting the disk usage but doesn't emit an error when the disk usage - * limit is exceeded. + * this function is called by user. + * pause diskquota in current or specific database + * After this function being called, diskquota doesn't emit an error when the disk usage */ Datum diskquota_pause(PG_FUNCTION_ARGS) @@ -347,20 +351,39 @@ diskquota_pause(PG_FUNCTION_ARGS) errmsg("must be superuser to pause diskquota"))); } - LWLockAcquire(diskquota_locks.paused_lock, LW_EXCLUSIVE); - *diskquota_paused = true; - LWLockRelease(diskquota_locks.paused_lock); + Oid dbid = MyDatabaseId; + if (PG_NARGS() == 1) { + dbid = PG_GETARG_OID(0); + } + + // pause current worker + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + { + bool found; + DiskQuotaWorkerEntry *hentry; + + hentry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, + (void*)&dbid, + // segment dose not boot the worker + // this will add new element on segment + // delete this element in diskquota_resume() + HASH_ENTER, + &found); + + hentry->is_paused = true; + } + LWLockRelease(diskquota_locks.worker_map_lock); if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(true /* pause_extension */); + dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, + true /* pause_extension */); PG_RETURN_VOID(); } /* - * Set diskquota_paused to false. - * This function is called by user. After this function being called, diskquota - * resume to emit an error when the disk usage limit is exceeded. + * this function is called by user. + * active diskquota in current or specific database */ Datum diskquota_resume(PG_FUNCTION_ARGS) @@ -372,12 +395,36 @@ diskquota_resume(PG_FUNCTION_ARGS) errmsg("must be superuser to resume diskquota"))); } - LWLockAcquire(diskquota_locks.paused_lock, LW_EXCLUSIVE); - *diskquota_paused = false; - LWLockRelease(diskquota_locks.paused_lock); + Oid dbid = MyDatabaseId; + if (PG_NARGS() == 1) { + dbid = PG_GETARG_OID(0); + } + + // active current worker + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + { + bool found; + DiskQuotaWorkerEntry *hentry; + + hentry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, + (void*)&dbid, + HASH_FIND, + &found); + if (found) { + hentry->is_paused = false; + } + + // remove the element since we do not need any more + // ref diskquota_pause() + if (found && hentry->handle == NULL) { + hash_search(disk_quota_worker_map, (void*)&dbid, HASH_REMOVE, &found); + } + } + LWLockRelease(diskquota_locks.worker_map_lock); if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(false /* pause_extension */); + dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, + false /* pause_extension */); PG_RETURN_VOID(); } diff --git a/quotamodel.c b/quotamodel.c index ddc1645a9a1..c50364e1641 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -431,8 +431,7 @@ disk_quota_shmem_startup(void) * Four shared memory data. extension_ddl_message is used to handle * diskquota extension create/drop command. disk_quota_black_map is used * to store out-of-quota blacklist. active_tables_map is used to store - * active tables whose disk usage is changed. diskquota_paused is a flag - * used to pause the extension. + * active tables whose disk usage is changed. */ extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), @@ -466,12 +465,6 @@ disk_quota_shmem_startup(void) &hash_ctl, HASH_ELEM | HASH_FUNCTION); - diskquota_paused = ShmemInitStruct("diskquota_paused", - sizeof(bool), - &found); - if (!found) - memset((void *) diskquota_paused, 0, sizeof(bool)); - diskquota_hardlimit = ShmemInitStruct("diskquota_hardlimit", sizeof(bool), &found); @@ -511,7 +504,6 @@ init_lwlocks(void) diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); - diskquota_locks.paused_lock = LWLockAssign(); diskquota_locks.relation_cache_lock = LWLockAssign(); diskquota_locks.hardlimit_lock = LWLockAssign(); diskquota_locks.worker_map_lock = LWLockAssign(); @@ -535,7 +527,6 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(DiskQuotaWorkerEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); - size += sizeof(bool); /* sizeof(*diskquota_paused) */ size += sizeof(bool); /* sizeof(*diskquota_hardlimit) */ return size; } diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index bfca21a78c0..0d612dbb208 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -5,6 +5,7 @@ test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size test: test_pause_and_resume +test: test_pause_and_resume_multiple_db # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_pause_and_resume_multiple_db.out b/tests/regress/expected/test_pause_and_resume_multiple_db.out new file mode 100644 index 00000000000..4247d9c471c --- /dev/null +++ b/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -0,0 +1,171 @@ +-- need 'contrib_regression' as test database +\c +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE DATABASE test_pause_and_resume; +CREATE DATABASE test_new_create_database; +\c test_pause_and_resume +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +\c contrib_regression +CREATE TABLE s1.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c test_pause_and_resume +CREATE TABLE s1.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c contrib_regression +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +\c test_pause_and_resume +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +\c contrib_regression +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_pause_and_resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_new_create_database; +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; -- new database should be active although other database is paused +CREATE TABLE s1.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +-- resume should onle effect current database +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +\c contrib_regression +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:s1 +\c test_pause_and_resume +DROP EXTENSION diskquota; +\c test_new_create_database +DROP EXTENSION diskquota; +\c contrib_regression +DROP SCHEMA s1 CASCADE; +NOTICE: drop cascades to table s1.a +DROP DATABASE test_pause_and_resume; +DROP DATABASE test_new_create_database; diff --git a/tests/regress/sql/test_pause_and_resume_multiple_db.sql b/tests/regress/sql/test_pause_and_resume_multiple_db.sql new file mode 100644 index 00000000000..5209f3a505a --- /dev/null +++ b/tests/regress/sql/test_pause_and_resume_multiple_db.sql @@ -0,0 +1,77 @@ +-- need 'contrib_regression' as test database +\c + +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE DATABASE test_pause_and_resume; +CREATE DATABASE test_new_create_database; + +\c test_pause_and_resume +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; + +\c contrib_regression +CREATE TABLE s1.a(i int); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed + +\c test_pause_and_resume +CREATE TABLE s1.a(i int); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed + +\c contrib_regression +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c test_pause_and_resume +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c contrib_regression +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +\c test_pause_and_resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +\c test_new_create_database; +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; -- new database should be active although other database is paused +CREATE TABLE s1.a(i int); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail +SELECT diskquota.pause(); -- pause extension, onle effect current database +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed + +-- resume should onle effect current database +SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c contrib_regression +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +SELECT diskquota.resume(); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail + +\c test_pause_and_resume +DROP EXTENSION diskquota; + +\c test_new_create_database +DROP EXTENSION diskquota; + +\c contrib_regression +DROP SCHEMA s1 CASCADE; +DROP DATABASE test_pause_and_resume; +DROP DATABASE test_new_create_database; From 23472c4e86238591e8d96abc24b123e02f7213e2 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 25 Jan 2022 15:47:58 +0800 Subject: [PATCH 110/330] cleanup header include --- diskquota.c | 25 +++---------------------- diskquota.h | 15 ++++++++++++++- diskquota_utility.c | 15 ++------------- enforcement.c | 4 +--- gp_activetable.c | 15 --------------- gp_activetable.h | 4 ++-- quotamodel.c | 29 ++++------------------------- relation_cache.c | 4 ---- relation_cache.h | 5 ++--- 9 files changed, 28 insertions(+), 88 deletions(-) diff --git a/diskquota.c b/diskquota.c index d20b6ebbb60..227caec35f7 100644 --- a/diskquota.c +++ b/diskquota.c @@ -14,43 +14,24 @@ * * ------------------------------------------------------------------------- */ -#include "postgres.h" +#include "diskquota.h" +#include "gp_activetable.h" -#include -#include +#include "postgres.h" -#include "access/tupdesc.h" #include "access/xact.h" -#include "catalog/indexing.h" -#include "catalog/namespace.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_database.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_type.h" #include "cdb/cdbvars.h" #include "commands/dbcommands.h" -#include "commands/extension.h" #include "executor/spi.h" -#include "libpq/libpq-be.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "pgstat.h" #include "storage/ipc.h" #include "storage/proc.h" #include "tcop/idle_resource_cleaner.h" #include "tcop/utility.h" -#include "utils/acl.h" #include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/formatting.h" -#include "utils/memutils.h" -#include "utils/numeric.h" #include "utils/ps_status.h" #include "utils/snapmgr.h" #include "utils/syscache.h" -#include "gp_activetable.h" -#include "diskquota.h" PG_MODULE_MAGIC; #define DISKQUOTA_DB "diskquota" diff --git a/diskquota.h b/diskquota.h index c3a65b9be17..02d4c798c62 100644 --- a/diskquota.h +++ b/diskquota.h @@ -1,9 +1,22 @@ #ifndef DISK_QUOTA_H #define DISK_QUOTA_H +#include "c.h" +#include "postgres.h" + +#include "fmgr.h" +#include "storage/lock.h" #include "storage/lwlock.h" #include "postmaster/bgworker.h" -#include "port/atomics.h" + +#include "utils/hsearch.h" +#include "storage/relfilenode.h" +#include "storage/lock.h" + +#include "fmgr.h" +#include "utils/relcache.h" + +#include /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 diff --git a/diskquota_utility.c b/diskquota_utility.c index 8a6c4f0f572..1bbdf516e46 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -17,20 +17,14 @@ */ #include "postgres.h" -#include #include #include "access/aomd.h" -#include "access/xact.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_collation.h" -#include "catalog/pg_database.h" #include "catalog/pg_extension.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_type.h" #include "catalog/pg_namespace.h" -#include "catalog/pg_tablespace.h" #include "catalog/indexing.h" #include "commands/dbcommands.h" #include "commands/extension.h" @@ -38,19 +32,14 @@ #include "executor/spi.h" #include "nodes/makefuncs.h" #include "storage/proc.h" -#include "tcop/utility.h" -#include "utils/acl.h" #include "utils/builtins.h" #include "utils/faultinjector.h" #include "utils/fmgroids.h" #include "utils/formatting.h" -#include "utils/memutils.h" #include "utils/numeric.h" -#include "utils/snapmgr.h" #include "libpq-fe.h" #include -#include #include #include @@ -338,8 +327,8 @@ dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) /* * this function is called by user. - * pause diskquota in current or specific database - * After this function being called, diskquota doesn't emit an error when the disk usage + * pause diskquota in current or specific database. + * After this function being called, diskquota doesn't emit an error when the disk usage limit is exceeded. */ Datum diskquota_pause(PG_FUNCTION_ARGS) diff --git a/enforcement.c b/enforcement.c index b6366ebad4c..48df213a671 100644 --- a/enforcement.c +++ b/enforcement.c @@ -15,10 +15,8 @@ #include "postgres.h" #include "cdb/cdbdisp.h" -#include "cdb/cdbdisp_async.h" #include "executor/executor.h" -#include "storage/bufmgr.h" -#include "utils/resowner.h" + #include "diskquota.h" #define CHECKED_OID_LIST_NUM 64 diff --git a/gp_activetable.c b/gp_activetable.c index 00cb45a6dba..352d998d7bd 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -18,31 +18,16 @@ #include "access/htup_details.h" #include "access/xact.h" #include "catalog/catalog.h" -#include "catalog/indexing.h" -#include "catalog/pg_class.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_type.h" #include "catalog/objectaccess.h" -#include "cdb/cdbbufferedappend.h" #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" #include "cdb/cdbvars.h" #include "commands/dbcommands.h" #include "executor/spi.h" -#include "fmgr.h" #include "funcapi.h" #include "libpq-fe.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "storage/shmem.h" -#include "storage/smgr.h" -#include "utils/array.h" -#include "utils/builtins.h" #include "utils/faultinjector.h" -#include "utils/fmgroids.h" #include "utils/lsyscache.h" -#include "utils/relfilenodemap.h" -#include "utils/syscache.h" #include "gp_activetable.h" #include "diskquota.h" diff --git a/gp_activetable.h b/gp_activetable.h index 5b194f26c76..c2b0cfcea6e 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -1,8 +1,8 @@ #ifndef ACTIVE_TABLE_H #define ACTIVE_TABLE_H -#include "storage/lwlock.h" -#include "diskquota.h" +#include "c.h" +#include "utils/hsearch.h" /* Cache to detect the active table list */ typedef struct DiskQuotaActiveTableFileEntry diff --git a/quotamodel.c b/quotamodel.c index c50364e1641..dbc33428601 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -13,53 +13,32 @@ * * ------------------------------------------------------------------------- */ +#include "diskquota.h" +#include "gp_activetable.h" +#include "relation_cache.h" + #include "postgres.h" -#include "access/heapam.h" -#include "access/htup_details.h" -#include "access/reloptions.h" -#include "access/skey.h" -#include "access/transam.h" -#include "access/tupdesc.h" #include "access/xact.h" -#include "catalog/indexing.h" -#include "catalog/namespace.h" -#include "catalog/pg_class.h" -#include "catalog/pg_database.h" #include "catalog/pg_tablespace.h" -#include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/tablespace.h" #include "executor/spi.h" #include "funcapi.h" -#include "lib/stringinfo.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" #include "storage/ipc.h" -#include "storage/latch.h" -#include "storage/lwlock.h" -#include "storage/relfilenode.h" -#include "storage/shmem.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/faultinjector.h" -#include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "libpq-fe.h" -#include -#include #include "cdb/cdbvars.h" #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" #include "cdb/cdbutil.h" -#include "gp_activetable.h" -#include "diskquota.h" -#include "relation_cache.h" - /* cluster level max size of black list */ #define MAX_DISK_QUOTA_BLACK_ENTRIES (1024 * 1024) /* cluster level init size of black list */ diff --git a/relation_cache.c b/relation_cache.c index c8bdc407de4..ce285ef367b 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -4,11 +4,7 @@ #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" #include "catalog/pg_tablespace.h" -#include "catalog/pg_type.h" #include "catalog/objectaccess.h" -#include "executor/spi.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" #include "utils/relfilenodemap.h" #include "utils/syscache.h" #include "utils/array.h" diff --git a/relation_cache.h b/relation_cache.h index dde9a1a71bf..70a8080a1c4 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -1,10 +1,9 @@ #ifndef RELATION_CACHE_H #define RELATION_CACHE_H +#include "c.h" +#include "utils/hsearch.h" #include "storage/relfilenode.h" -#include "utils/relcache.h" -#include "storage/lock.h" -#include "postgres.h" typedef struct DiskQuotaRelationCacheEntry { From 21f3ec2461f3e3439b85d824220e81f84d3dfb98 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Wed, 26 Jan 2022 16:05:16 +0800 Subject: [PATCH 111/330] re-dispatch pause status --- diskquota--2.0.sql | 4 ++ tests/regress/diskquota_schedule | 1 + .../expected/test_drop_after_pause.out | 42 +++++++++++++++++++ tests/regress/sql/test_drop_after_pause.sql | 24 +++++++++++ 4 files changed, 71 insertions(+) create mode 100644 tests/regress/expected/test_drop_after_pause.out create mode 100644 tests/regress/sql/test_drop_after_pause.sql diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 4dc0934ef0a..8898de9cd02 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -235,3 +235,7 @@ BEGIN RETURN FALSE; END; $$ LANGUAGE PLpgSQL; + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT from diskquota.resume(); diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 0d612dbb208..92e6ff8087b 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -6,6 +6,7 @@ test: test_relation_cache test: test_uncommitted_table_size test: test_pause_and_resume test: test_pause_and_resume_multiple_db +test: test_drop_after_pause # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out new file mode 100644 index 00000000000..7f2cbc74d4b --- /dev/null +++ b/tests/regress/expected/test_drop_after_pause.out @@ -0,0 +1,42 @@ +CREATE DATABASE test_drop_after_pause; +\c test_drop_after_pause +CREATE EXTENSION diskquota; +SELECT FROM diskquota.pause(); +-- +(1 row) + +DROP EXTENSION diskquota; +CREATE EXTENSION diskquota; +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name:41008 (seg0 127.0.0.1:6002 pid=388076) +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_after_pause; diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql new file mode 100644 index 00000000000..51a5fd802a3 --- /dev/null +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -0,0 +1,24 @@ +CREATE DATABASE test_drop_after_pause; + +\c test_drop_after_pause + +CREATE EXTENSION diskquota; +SELECT FROM diskquota.pause(); +DROP EXTENSION diskquota; + +CREATE EXTENSION diskquota; + +SELECT diskquota.enable_hardlimit(); + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int); +SELECT diskquota.set_schema_quota('SX', '1MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail + +SELECT diskquota.disable_hardlimit(); +DROP EXTENSION diskquota; + +\c contrib_regression + +DROP DATABASE test_drop_after_pause; From 1b2911b19330e973cdef74e8818f46ddb1cf351c Mon Sep 17 00:00:00 2001 From: Sasasu Date: Wed, 26 Jan 2022 10:30:01 +0800 Subject: [PATCH 112/330] allow to re-create extension --- diskquota.c | 33 ++++++++++++++++++------ tests/regress/diskquota_schedule | 2 +- tests/regress/expected/test_recreate.out | 9 +++++++ tests/regress/sql/test_recreate.sql | 14 ++++++++++ 4 files changed, 49 insertions(+), 9 deletions(-) create mode 100644 tests/regress/expected/test_recreate.out create mode 100644 tests/regress/sql/test_recreate.sql diff --git a/diskquota.c b/diskquota.c index 227caec35f7..31523f17495 100644 --- a/diskquota.c +++ b/diskquota.c @@ -820,18 +820,35 @@ on_del_db(Oid dbid, MessageResult * code) static void add_dbid_to_database_list(Oid dbid) { - StringInfoData str; int ret; - initStringInfo(&str); - appendStringInfo(&str, "insert into diskquota_namespace.database_list values(%u);", dbid); + Oid argt[1] = {INT4OID}; + Datum argv[1] = {Int32GetDatum(dbid)}; - /* errors will be cached in outer function */ - ret = SPI_execute(str.data, false, 0); - if (ret != SPI_OK_INSERT) - { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); + ret = SPI_execute_with_args( + "select * from diskquota_namespace.database_list where dbid = $1", + 1, argt, argv, NULL, true, 0); + + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errmsg( + "[diskquota launcher] error occured while checking database_list, " + " code = %d errno = %d", ret, errno))); + + if (SPI_processed == 1) { + ereport(WARNING, (errmsg( + "[diskquota launcher] database id %d is already actived, " + "skip database_list update", dbid))); + return; } + + ret = SPI_execute_with_args("insert into diskquota_namespace.database_list values($1)", + 1, argt,argv, NULL, false, 0); + + if (ret != SPI_OK_INSERT || SPI_processed != 1) + ereport(ERROR, (errmsg( + "[diskquota launcher] error occured while updating database_list, " + " code = %d errno = %d", ret, errno))); + return; } diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 92e6ff8087b..33164929a14 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -11,7 +11,7 @@ test: test_drop_after_pause # test: test_table_size test: test_fast_disk_check #test: test_insert_after_drop -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate test: test_truncate test: test_delete_quota test: test_partition diff --git a/tests/regress/expected/test_recreate.out b/tests/regress/expected/test_recreate.out new file mode 100644 index 00000000000..77fbeec3515 --- /dev/null +++ b/tests/regress/expected/test_recreate.out @@ -0,0 +1,9 @@ +\c +CREATE DATABASE test_recreate; +\c diskquota +INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; +\c test_recreate +CREATE EXTENSION diskquota; -- shoud be ok +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_recreate; diff --git a/tests/regress/sql/test_recreate.sql b/tests/regress/sql/test_recreate.sql new file mode 100644 index 00000000000..ea3820d7245 --- /dev/null +++ b/tests/regress/sql/test_recreate.sql @@ -0,0 +1,14 @@ +\c + +CREATE DATABASE test_recreate; + +\c diskquota + +INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; + +\c test_recreate +CREATE EXTENSION diskquota; -- shoud be ok +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_recreate; From 88b14c249b0aced3f782befebfd71653f7a2796c Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 25 Jan 2022 15:47:58 +0800 Subject: [PATCH 113/330] use atomic on flag hardlimit_lock (#124) --- diskquota.h | 9 +++------ quotamodel.c | 25 +++++++------------------ 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/diskquota.h b/diskquota.h index 02d4c798c62..8933c4a6630 100644 --- a/diskquota.h +++ b/diskquota.h @@ -3,17 +3,15 @@ #include "c.h" #include "postgres.h" +#include "port/atomics.h" #include "fmgr.h" #include "storage/lock.h" #include "storage/lwlock.h" +#include "storage/relfilenode.h" #include "postmaster/bgworker.h" #include "utils/hsearch.h" -#include "storage/relfilenode.h" -#include "storage/lock.h" - -#include "fmgr.h" #include "utils/relcache.h" #include @@ -51,7 +49,6 @@ struct DiskQuotaLocks LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ LWLock *monitoring_dbid_cache_lock; LWLock *relation_cache_lock; - LWLock *hardlimit_lock; LWLock *worker_map_lock; LWLock *altered_reloid_cache_lock; }; @@ -109,7 +106,7 @@ typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; -extern bool *diskquota_hardlimit; +extern pg_atomic_uint32 *diskquota_hardlimit; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; diff --git a/quotamodel.c b/quotamodel.c index dbc33428601..6b3083fb6e3 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -165,7 +165,7 @@ static HTAB *table_size_map = NULL; static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; -bool *diskquota_hardlimit = NULL; +pg_atomic_uint32 *diskquota_hardlimit = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; @@ -445,10 +445,10 @@ disk_quota_shmem_startup(void) HASH_ELEM | HASH_FUNCTION); diskquota_hardlimit = ShmemInitStruct("diskquota_hardlimit", - sizeof(bool), + sizeof(pg_atomic_uint32), &found); if (!found) - memset((void *) diskquota_hardlimit, 0, sizeof(bool)); + memset((void *) diskquota_hardlimit, 0, sizeof(pg_atomic_uint32)); /* use disk_quota_worker_map to manage diskquota worker processes. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); @@ -484,7 +484,6 @@ init_lwlocks(void) diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); diskquota_locks.relation_cache_lock = LWLockAssign(); - diskquota_locks.hardlimit_lock = LWLockAssign(); diskquota_locks.worker_map_lock = LWLockAssign(); diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); } @@ -711,7 +710,6 @@ refresh_disk_quota_usage(bool is_init) bool pushed_active_snap = false; bool ret = true; HTAB *local_active_table_stat_map = NULL; - bool enable_hardlimit; StartTransactionCommand(); @@ -746,10 +744,7 @@ refresh_disk_quota_usage(bool is_init) /* copy local black map back to shared black map */ flush_local_black_map(); /* Dispatch blackmap entries to segments to perform hard-limit. */ - LWLockAcquire(diskquota_locks.hardlimit_lock, LW_SHARED); - enable_hardlimit = *diskquota_hardlimit; - LWLockRelease(diskquota_locks.hardlimit_lock); - if (enable_hardlimit) + if (pg_atomic_read_u32(diskquota_hardlimit)) dispatch_blackmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } @@ -1580,9 +1575,7 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) if (OidIsValid(reloid)) return check_blackmap_by_reloid(reloid); - LWLockAcquire(diskquota_locks.hardlimit_lock, LW_SHARED); - enable_hardlimit = *diskquota_hardlimit; - LWLockRelease(diskquota_locks.hardlimit_lock); + enable_hardlimit = pg_atomic_read_u32(diskquota_hardlimit); #ifdef FAULT_INJECTOR if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) @@ -2202,9 +2195,7 @@ diskquota_enable_hardlimit(PG_FUNCTION_ARGS) LWLockRelease(diskquota_locks.black_map_lock); } - LWLockAcquire(diskquota_locks.hardlimit_lock, LW_EXCLUSIVE); - *diskquota_hardlimit = true; - LWLockRelease(diskquota_locks.hardlimit_lock); + pg_atomic_write_u32(diskquota_hardlimit, true); if (IS_QUERY_DISPATCHER()) dispatch_hardlimit_flag(true /*enable_hardlimit*/); @@ -2221,9 +2212,7 @@ diskquota_disable_hardlimit(PG_FUNCTION_ARGS) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to disable hardlimit"))); - LWLockAcquire(diskquota_locks.hardlimit_lock, LW_EXCLUSIVE); - *diskquota_hardlimit = false; - LWLockRelease(diskquota_locks.hardlimit_lock); + pg_atomic_write_u32(diskquota_hardlimit, false); if (IS_QUERY_DISPATCHER()) dispatch_hardlimit_flag(false /*enable_hardlimit*/); From aebb21bf744c6343ee2ac488aa2823e91af40db6 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 25 Jan 2022 17:37:13 +0800 Subject: [PATCH 114/330] send a error if launcher crashed (#124) --- diskquota_utility.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 1bbdf516e46..0a109b698ce 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -236,7 +236,7 @@ generate_insert_table_size_sql(StringInfoData *insert_buf, int extMajorVersion) Datum diskquota_start_worker(PG_FUNCTION_ARGS) { - int rc; + int rc, launcher_pid; /* * Lock on extension_ddl_lock to avoid multiple backend create diskquota @@ -248,8 +248,9 @@ diskquota_start_worker(PG_FUNCTION_ARGS) extension_ddl_message->cmd = CMD_CREATE_EXTENSION; extension_ddl_message->result = ERR_PENDING; extension_ddl_message->dbid = MyDatabaseId; + launcher_pid = extension_ddl_message->launcher_pid; /* setup sig handler to diskquota launcher process */ - rc = kill(extension_ddl_message->launcher_pid, SIGUSR1); + rc = kill(launcher_pid, SIGUSR1); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { @@ -264,6 +265,11 @@ diskquota_start_worker(PG_FUNCTION_ARGS) if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); + + ereportif(ERROR, + kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + (errmsg("[diskquota] diskquotal launcher pid = %d no longer exist", launcher_pid))); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) { @@ -486,7 +492,7 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) { Oid oid; - int rc; + int rc, launcher_pid; if (access != OAT_DROP || classId != ExtensionRelationId) goto out; @@ -504,7 +510,8 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, extension_ddl_message->cmd = CMD_DROP_EXTENSION; extension_ddl_message->result = ERR_PENDING; extension_ddl_message->dbid = MyDatabaseId; - rc = kill(extension_ddl_message->launcher_pid, SIGUSR1); + launcher_pid = extension_ddl_message->launcher_pid; + rc = kill(launcher_pid, SIGUSR1); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { @@ -519,6 +526,11 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); + + ereportif(ERROR, + kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + (errmsg("[diskquota] diskquotal launcher pid = %d no longer exist", launcher_pid))); + LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) { From a7d371c05354ff7cf4c97e7c2341c0e56bd7feb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 27 Jan 2022 19:16:11 +0800 Subject: [PATCH 115/330] Pause before DROP EXTENSION (#121) Currently, deadlock can occur when 1. A user session is doing DROP EXTENSION, and 2. A bgworker is loading quota configs using SPI. This patch fixes the issue by pausing diskquota before DROP EXTENSION so that the bgworker will not load config anymore. Note that this cannot be done using object_access_hook() because the extension object is dropped AFTER dropping all tables that belong to the extension. --- tests/isolation2/sql/cleanup.sql | 2 + tests/regress/expected/clean.out | 12 ++ .../expected/test_drop_after_pause.out | 26 +++- tests/regress/expected/test_extension.out | 143 +++++++++++++++--- .../test_pause_and_resume_multiple_db.out | 24 +++ tests/regress/expected/test_recreate.out | 12 ++ tests/regress/sql/clean.sql | 2 + tests/regress/sql/test_drop_after_pause.sql | 5 +- tests/regress/sql/test_extension.sql | 45 +++--- .../sql/test_pause_and_resume_multiple_db.sql | 4 + tests/regress/sql/test_recreate.sql | 2 + 11 files changed, 227 insertions(+), 50 deletions(-) diff --git a/tests/isolation2/sql/cleanup.sql b/tests/isolation2/sql/cleanup.sql index b569b6cce8b..fa8de5acd8e 100644 --- a/tests/isolation2/sql/cleanup.sql +++ b/tests/isolation2/sql/cleanup.sql @@ -1,3 +1,5 @@ -- start_ignore +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -- end_ignore diff --git a/tests/regress/expected/clean.out b/tests/regress/expected/clean.out index 4712add5c30..0b0773f848f 100644 --- a/tests/regress/expected/clean.out +++ b/tests/regress/expected/clean.out @@ -1,4 +1,16 @@ DROP TABLE badquota.t1; DROP ROLE testbody; DROP SCHEMA badquota; +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 7f2cbc74d4b..32e492778eb 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -1,8 +1,16 @@ CREATE DATABASE test_drop_after_pause; \c test_drop_after_pause CREATE EXTENSION diskquota; -SELECT FROM diskquota.pause(); --- +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) DROP EXTENSION diskquota; @@ -30,13 +38,25 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:41008 (seg0 127.0.0.1:6002 pid=388076) +ERROR: schema's disk space quota exceeded with name:746125 (seg0 127.0.0.1:6002 pid=22648) SELECT diskquota.disable_hardlimit(); disable_hardlimit ------------------- (1 row) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; \c contrib_regression DROP DATABASE test_drop_after_pause; diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 085019e2147..c7d4942b7a5 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -23,7 +23,6 @@ show max_worker_processes; 2 \c dbx0 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 3 CREATE SCHEMA SX; @@ -74,7 +73,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 5 CREATE SCHEMA SX; @@ -99,7 +97,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 6 CREATE SCHEMA SX; @@ -124,7 +121,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 7 CREATE SCHEMA SX; @@ -149,7 +145,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 8 CREATE SCHEMA SX; @@ -174,7 +169,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 9 CREATE SCHEMA SX; @@ -199,7 +193,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 10 CREATE SCHEMA SX; @@ -224,7 +217,6 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 CREATE SCHEMA SX; @@ -249,74 +241,177 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:290) -\! sleep 2 +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:290) -\! sleep 2 +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx0 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 10 \c dbx1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 9 \c dbx2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 8 \c dbx3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 7 \c dbx4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 6 \c dbx5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 5 \c dbx6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 4 \c dbx7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 3 \c dbx8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c dbx9 +SELECT diskquota.pause(); +ERROR: schema "diskquota" does not exist +SELECT diskquota.wait_for_worker_new_epoch(); +ERROR: schema "diskquota" does not exist DROP EXTENSION diskquota; ERROR: extension "diskquota" does not exist -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c dbx10 +SELECT diskquota.pause(); +ERROR: schema "diskquota" does not exist +SELECT diskquota.wait_for_worker_new_epoch(); +ERROR: schema "diskquota" does not exist DROP EXTENSION diskquota; ERROR: extension "diskquota" does not exist -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 -\c postgres +\c contrib_regression DROP DATABASE dbx0 ; DROP DATABASE dbx1 ; DROP DATABASE dbx2 ; diff --git a/tests/regress/expected/test_pause_and_resume_multiple_db.out b/tests/regress/expected/test_pause_and_resume_multiple_db.out index 4247d9c471c..92028606b7d 100644 --- a/tests/regress/expected/test_pause_and_resume_multiple_db.out +++ b/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -161,8 +161,32 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail ERROR: schema's disk space quota exceeded with name:s1 \c test_pause_and_resume +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; \c test_new_create_database +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; \c contrib_regression DROP SCHEMA s1 CASCADE; diff --git a/tests/regress/expected/test_recreate.out b/tests/regress/expected/test_recreate.out index 77fbeec3515..d87713d409f 100644 --- a/tests/regress/expected/test_recreate.out +++ b/tests/regress/expected/test_recreate.out @@ -4,6 +4,18 @@ CREATE DATABASE test_recreate; INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; \c test_recreate CREATE EXTENSION diskquota; -- shoud be ok +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; \c contrib_regression DROP DATABASE test_recreate; diff --git a/tests/regress/sql/clean.sql b/tests/regress/sql/clean.sql index bf71fcb0d19..bb84eb2b7e1 100644 --- a/tests/regress/sql/clean.sql +++ b/tests/regress/sql/clean.sql @@ -2,4 +2,6 @@ DROP TABLE badquota.t1; DROP ROLE testbody; DROP SCHEMA badquota; +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql index 51a5fd802a3..b034fead210 100644 --- a/tests/regress/sql/test_drop_after_pause.sql +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -3,7 +3,8 @@ CREATE DATABASE test_drop_after_pause; \c test_drop_after_pause CREATE EXTENSION diskquota; -SELECT FROM diskquota.pause(); +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; CREATE EXTENSION diskquota; @@ -17,6 +18,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail SELECT diskquota.disable_hardlimit(); +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \c contrib_regression diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 0f243493138..5d04db10682 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -20,7 +20,6 @@ show max_worker_processes; \c dbx0 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -44,7 +43,6 @@ DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -56,7 +54,6 @@ DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -68,7 +65,6 @@ DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -80,7 +76,6 @@ DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -92,7 +87,6 @@ DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -104,7 +98,6 @@ DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -116,7 +109,6 @@ DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -128,70 +120,79 @@ DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx10 CREATE EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx0 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx3 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx4 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx5 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx6 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx7 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx8 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx9 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx10 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 2 \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -\c postgres +\c contrib_regression DROP DATABASE dbx0 ; DROP DATABASE dbx1 ; diff --git a/tests/regress/sql/test_pause_and_resume_multiple_db.sql b/tests/regress/sql/test_pause_and_resume_multiple_db.sql index 5209f3a505a..6a8cd67ca7a 100644 --- a/tests/regress/sql/test_pause_and_resume_multiple_db.sql +++ b/tests/regress/sql/test_pause_and_resume_multiple_db.sql @@ -66,9 +66,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail \c test_pause_and_resume +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \c test_new_create_database +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \c contrib_regression diff --git a/tests/regress/sql/test_recreate.sql b/tests/regress/sql/test_recreate.sql index ea3820d7245..4581df96c85 100644 --- a/tests/regress/sql/test_recreate.sql +++ b/tests/regress/sql/test_recreate.sql @@ -8,6 +8,8 @@ INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database \c test_recreate CREATE EXTENSION diskquota; -- shoud be ok +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \c contrib_regression From 57c1cb480b951cefe7aa0c5b911ea40c2f03a439 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 27 Jan 2022 16:13:49 +0800 Subject: [PATCH 116/330] add test case for pause hardlimit (#128) --- tests/regress/diskquota_schedule | 1 + tests/regress/expected/test_ctas_pause.out | 49 ++++++++++++++++++++++ tests/regress/sql/test_ctas_pause.sql | 19 +++++++++ 3 files changed, 69 insertions(+) create mode 100644 tests/regress/expected/test_ctas_pause.out create mode 100644 tests/regress/sql/test_ctas_pause.sql diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 33164929a14..7133e9ce703 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -23,6 +23,7 @@ test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly test: test_blackmap +test: test_ctas_pause test: test_ctas_role test: test_ctas_schema test: test_ctas_tablespace_role diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out new file mode 100644 index 00000000000..c1dd71bf939 --- /dev/null +++ b/tests/regress/expected/test_ctas_pause.out @@ -0,0 +1,49 @@ +CREATE SCHEMA hardlimit_s; +SET search_path TO hardlimit_s; +SELECT diskquota.enable_hardlimit(); + enable_hardlimit +------------------ + +(1 row) + +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect fail +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ERROR: schema's disk space quota exceeded with name:110528 (seg1 127.0.0.1:6003 pid=73892) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); + disable_hardlimit +------------------- + +(1 row) + +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +DROP SCHEMA hardlimit_s CASCADE; +NOTICE: drop cascades to table t1 diff --git a/tests/regress/sql/test_ctas_pause.sql b/tests/regress/sql/test_ctas_pause.sql new file mode 100644 index 00000000000..c7c1a78a41d --- /dev/null +++ b/tests/regress/sql/test_ctas_pause.sql @@ -0,0 +1,19 @@ +CREATE SCHEMA hardlimit_s; +SET search_path TO hardlimit_s; + +SELECT diskquota.enable_hardlimit(); +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- heap table +CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect fail + +SELECT diskquota.pause(); + +CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed + +-- disable hardlimit and do some clean-ups. +SELECT diskquota.disable_hardlimit(); +SELECT diskquota.resume(); + +DROP SCHEMA hardlimit_s CASCADE; From 74ff561657816e8eaf8c5cadeec51043a388927a Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 9 Feb 2022 12:08:37 +0800 Subject: [PATCH 117/330] Attempt to fix flaky test_primary_failure (#132) Test case test_primary_failure will stop/start segment to produce a mirror switch. But the segment start could fail while replaying xlog. The failure was caused by the deleted tablespace directories in previous test cases. This commit removes the "rm" statement in those tablespace test cases and add "-p" to the "mkdir" command line. The corresponding sub-directories will be deleted by "DROP TABLESPACE" if the case passes. Relevant logs: 2022-02-08 10:09:30.458183 CST,,,p1182584,th1235613568,,,,0,,,seg1,,,,,"LOG","00000","entering standby mode",,,,,,,0,,"xlog.c",6537, 2022-02-08 10:09:30.458670 CST,,,p1182584,th1235613568,,,,0,,,seg1,,,,,"LOG","00000","redo starts at E/24638A28",,,,,,,0,,"xlog.c",7153, 2022-02-08 10:09:30.468323 CST,"cc","postgres",p1182588,th1235613568,"[local]",,2022-02-08 10:09:30 CST,0,,,seg1,,,,,"FATAL","57P03","the database system is starting up" ,"last replayed record at E/2481EA70",,,,,,0,,"postmaster.c",2552, 2022-02-08 10:09:30.484792 CST,,,p1182584,th1235613568,,,,0,,,seg1,,,,,"FATAL","58P01","directory ""/tmp/test_spc"" does not exist",,"Create this directory for the table space before restarting the server.",,,"xlog redo create tablespace: 2590660 ""/tmp/test_spc""",,0,,"tablespace.c",749, --- tests/regress/expected/test_blackmap.out | 3 +-- tests/regress/expected/test_ctas_tablespace_role.out | 1 - tests/regress/expected/test_ctas_tablespace_schema.out | 1 - tests/regress/expected/test_index.out | 3 +-- tests/regress/expected/test_mistake.out | 3 +-- tests/regress/expected/test_relation_size.out | 3 +-- tests/regress/expected/test_tablespace_role.out | 6 ++---- tests/regress/expected/test_tablespace_role_perseg.out | 6 ++---- tests/regress/expected/test_tablespace_schema.out | 6 ++---- tests/regress/expected/test_tablespace_schema_perseg.out | 6 ++---- tests/regress/sql/test_blackmap.sql | 3 +-- tests/regress/sql/test_ctas_tablespace_role.sql | 1 - tests/regress/sql/test_ctas_tablespace_schema.sql | 1 - tests/regress/sql/test_index.sql | 3 +-- tests/regress/sql/test_mistake.sql | 3 +-- tests/regress/sql/test_relation_size.sql | 3 +-- tests/regress/sql/test_tablespace_role.sql | 6 ++---- tests/regress/sql/test_tablespace_role_perseg.sql | 6 ++---- tests/regress/sql/test_tablespace_schema.sql | 6 ++---- tests/regress/sql/test_tablespace_schema_perseg.sql | 6 ++---- 20 files changed, 24 insertions(+), 52 deletions(-) diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_blackmap.out index 96d652a3a05..eae4fe65634 100644 --- a/tests/regress/expected/test_blackmap.out +++ b/tests/regress/expected/test_blackmap.out @@ -94,7 +94,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matche (1 row) -- Create a tablespace to test the rest of blocking types. -\! mkdir /tmp/blocked_space +\! mkdir -p /tmp/blocked_space CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; -- Insert an entry for blocked_t1 to blackmap on seg0. @@ -278,6 +278,5 @@ DROP TABLE blocked_t3; DROP TABLE blocked_t4; DROP TABLE blocked_t5; DROP TABLESPACE blocked_space; -\! rm -rf /tmp/blocked_space SET search_path TO DEFAULT; DROP SCHEMA s_blackmap; diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out index 8b884ce58e4..5c6df2e3bad 100644 --- a/tests/regress/expected/test_ctas_tablespace_role.out +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -84,7 +84,6 @@ NOTICE: table "aocs_table" does not exist, skipping RESET ROLE; RESET default_tablespace; DROP TABLESPACE ctas_rolespc; -\! rm -rf /tmp/ctas_rolespc; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/expected/test_ctas_tablespace_schema.out b/tests/regress/expected/test_ctas_tablespace_schema.out index 77543a00cc6..f63f324cd8b 100644 --- a/tests/regress/expected/test_ctas_tablespace_schema.out +++ b/tests/regress/expected/test_ctas_tablespace_schema.out @@ -82,7 +82,6 @@ RESET search_path; RESET default_tablespace; DROP SCHEMA hardlimit_s; DROP TABLESPACE ctas_schemaspc; -\! rm -rf /tmp/ctas_schemaspc; SELECT diskquota.disable_hardlimit(); disable_hardlimit ------------------- diff --git a/tests/regress/expected/test_index.out b/tests/regress/expected/test_index.out index f6724e915bb..dd66f0921c1 100644 --- a/tests/regress/expected/test_index.out +++ b/tests/regress/expected/test_index.out @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/indexspc +\! mkdir -p /tmp/indexspc -- end_ignore CREATE SCHEMA indexschema1; DROP TABLESPACE IF EXISTS indexspc; @@ -108,4 +108,3 @@ DROP INDEX indexschema1.a_index; DROP TABLE indexschema1.test_index_a; DROP SCHEMA indexschema1; DROP TABLESPACE indexspc; -\! rm -rf /tmp/indexspc diff --git a/tests/regress/expected/test_mistake.out b/tests/regress/expected/test_mistake.out index 3eeb578e730..fab4c6eb2f7 100644 --- a/tests/regress/expected/test_mistake.out +++ b/tests/regress/expected/test_mistake.out @@ -18,7 +18,7 @@ NOTICE: resource queue required -- using default resource queue "pg_default" select diskquota.set_role_quota('rmistake', '0 MB'); ERROR: disk quota can not be set to 0 MB -- start_ignore -\! mkdir /tmp/spcmistake +\! mkdir -p /tmp/spcmistake -- end_ignore DROP TABLESPACE IF EXISTS spcmistake; NOTICE: tablespace "spcmistake" does not exist, skipping @@ -32,4 +32,3 @@ ERROR: per segment quota ratio can not be set to 0 DROP SCHEMA nmistake; DROP ROLE rmistake; DROP TABLESPACE spcmistake; -\! rm -rf /tmp/spcmistake diff --git a/tests/regress/expected/test_relation_size.out b/tests/regress/expected/test_relation_size.out index 7841bec9f81..49292c9ccf1 100644 --- a/tests/regress/expected/test_relation_size.out +++ b/tests/regress/expected/test_relation_size.out @@ -31,7 +31,7 @@ SELECT pg_table_size('t2'); (1 row) -- start_ignore -\! mkdir /tmp/test_spc +\! mkdir -p /tmp/test_spc -- end_ignore DROP TABLESPACE IF EXISTS test_spc; NOTICE: tablespace "test_spc" does not exist, skipping @@ -66,7 +66,6 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; -\! rm -rf /tmp/test_spc CREATE TABLE ao (i int) WITH (appendonly=true); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index 23d25941587..6420f47b91e 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -1,6 +1,6 @@ -- Test role quota -- start_ignore -\! mkdir /tmp/rolespc +\! mkdir -p /tmp/rolespc -- end_ignore DROP TABLESPACE IF EXISTS rolespc; NOTICE: tablespace "rolespc" does not exist, skipping @@ -83,7 +83,7 @@ INSERT INTO b SELECT generate_series(1,100); ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- Test alter tablespace -- start_ignore -\! mkdir /tmp/rolespc2 +\! mkdir -p /tmp/rolespc2 -- end_ignore DROP TABLESPACE IF EXISTS rolespc2; NOTICE: tablespace "rolespc2" does not exist, skipping @@ -155,5 +155,3 @@ RESET search_path; DROP SCHEMA rolespcrole; DROP TABLESPACE rolespc; DROP TABLESPACE rolespc2; -\! rm -rf /tmp/rolespc; -\! rm -rf /tmp/rolespc2 diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index 0dffd077ccd..709aa0f3c6f 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -1,6 +1,6 @@ -- Test role quota -- start_ignore -\! mkdir /tmp/rolespc_perseg +\! mkdir -p /tmp/rolespc_perseg -- end_ignore DROP TABLESPACE IF EXISTS rolespc_perseg; NOTICE: tablespace "rolespc_perseg" does not exist, skipping @@ -96,7 +96,7 @@ INSERT INTO b SELECT generate_series(1,100); ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota -- Test alter tablespace -- start_ignore -\! mkdir /tmp/rolespc_perseg2 +\! mkdir -p /tmp/rolespc_perseg2 -- end_ignore DROP TABLESPACE IF EXISTS rolespc_perseg2; NOTICE: tablespace "rolespc_perseg2" does not exist, skipping @@ -208,5 +208,3 @@ RESET search_path; DROP SCHEMA rolespc_persegrole; DROP TABLESPACE rolespc_perseg; DROP TABLESPACE rolespc_perseg2; -\! rm -rf /tmp/rolespc_perseg; -\! rm -rf /tmp/rolespc_perseg2 diff --git a/tests/regress/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out index 6282d84a62b..f8e96a922fd 100644 --- a/tests/regress/expected/test_tablespace_schema.out +++ b/tests/regress/expected/test_tablespace_schema.out @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/schemaspc +\! mkdir -p /tmp/schemaspc -- end_ignore CREATE SCHEMA spcs1; DROP TABLESPACE IF EXISTS schemaspc; @@ -65,7 +65,7 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- Test alter tablespace -- start_ignore -\! mkdir /tmp/schemaspc2 +\! mkdir -p /tmp/schemaspc2 -- end_ignore DROP TABLESPACE IF EXISTS schemaspc2; NOTICE: tablespace "schemaspc2" does not exist, skipping @@ -135,5 +135,3 @@ DROP TABLE spcs1.a2, spcs1.a; DROP SCHEMA spcs1, spcs2; DROP TABLESPACE schemaspc; DROP TABLESPACE schemaspc2; -\! rm -rf /tmp/schemaspc -\! rm -rf /tmp/schemaspc2 diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index a91218e6ffe..2a0c73daa4d 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/schemaspc_perseg +\! mkdir -p /tmp/schemaspc_perseg -- end_ignore -- Test tablespace quota perseg CREATE SCHEMA spcs1_perseg; @@ -94,7 +94,7 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- Test alter tablespace -- start_ignore -\! mkdir /tmp/schemaspc_perseg2 +\! mkdir -p /tmp/schemaspc_perseg2 -- end_ignore DROP TABLESPACE IF EXISTS schemaspc_perseg2; NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping @@ -209,5 +209,3 @@ DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; DROP TABLESPACE schemaspc_perseg; DROP TABLESPACE schemaspc_perseg2; -\! rm -rf /tmp/schemaspc_perseg -\! rm -rf /tmp/schemaspc_perseg2 diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_blackmap.sql index ffd6568960c..2efc0478acc 100644 --- a/tests/regress/sql/test_blackmap.sql +++ b/tests/regress/sql/test_blackmap.sql @@ -81,7 +81,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matche WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- Create a tablespace to test the rest of blocking types. -\! mkdir /tmp/blocked_space +\! mkdir -p /tmp/blocked_space CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; @@ -191,6 +191,5 @@ DROP TABLE blocked_t3; DROP TABLE blocked_t4; DROP TABLE blocked_t5; DROP TABLESPACE blocked_space; -\! rm -rf /tmp/blocked_space SET search_path TO DEFAULT; DROP SCHEMA s_blackmap; diff --git a/tests/regress/sql/test_ctas_tablespace_role.sql b/tests/regress/sql/test_ctas_tablespace_role.sql index ccb38ce5107..4b6ded807d8 100644 --- a/tests/regress/sql/test_ctas_tablespace_role.sql +++ b/tests/regress/sql/test_ctas_tablespace_role.sql @@ -41,7 +41,6 @@ DROP TABLE IF EXISTS aocs_table; RESET ROLE; RESET default_tablespace; DROP TABLESPACE ctas_rolespc; -\! rm -rf /tmp/ctas_rolespc; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_ctas_tablespace_schema.sql b/tests/regress/sql/test_ctas_tablespace_schema.sql index baebf79e74d..0caac946d12 100644 --- a/tests/regress/sql/test_ctas_tablespace_schema.sql +++ b/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -41,5 +41,4 @@ RESET search_path; RESET default_tablespace; DROP SCHEMA hardlimit_s; DROP TABLESPACE ctas_schemaspc; -\! rm -rf /tmp/ctas_schemaspc; SELECT diskquota.disable_hardlimit(); diff --git a/tests/regress/sql/test_index.sql b/tests/regress/sql/test_index.sql index e9201a73bd4..5b884b1e0af 100644 --- a/tests/regress/sql/test_index.sql +++ b/tests/regress/sql/test_index.sql @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/indexspc +\! mkdir -p /tmp/indexspc -- end_ignore CREATE SCHEMA indexschema1; DROP TABLESPACE IF EXISTS indexspc; @@ -43,4 +43,3 @@ DROP INDEX indexschema1.a_index; DROP TABLE indexschema1.test_index_a; DROP SCHEMA indexschema1; DROP TABLESPACE indexspc; -\! rm -rf /tmp/indexspc diff --git a/tests/regress/sql/test_mistake.sql b/tests/regress/sql/test_mistake.sql index 9a1cbf20f58..fd0e9d300b3 100644 --- a/tests/regress/sql/test_mistake.sql +++ b/tests/regress/sql/test_mistake.sql @@ -11,7 +11,7 @@ CREATE ROLE rmistake; select diskquota.set_role_quota('rmistake', '0 MB'); -- start_ignore -\! mkdir /tmp/spcmistake +\! mkdir -p /tmp/spcmistake -- end_ignore DROP TABLESPACE IF EXISTS spcmistake; CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; @@ -22,4 +22,3 @@ SELECT diskquota.set_per_segment_quota('spcmistake', 0); DROP SCHEMA nmistake; DROP ROLE rmistake; DROP TABLESPACE spcmistake; -\! rm -rf /tmp/spcmistake diff --git a/tests/regress/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql index fe277cd89ff..ffe102a2642 100644 --- a/tests/regress/sql/test_relation_size.sql +++ b/tests/regress/sql/test_relation_size.sql @@ -9,7 +9,7 @@ SELECT diskquota.relation_size('t2'); SELECT pg_table_size('t2'); -- start_ignore -\! mkdir /tmp/test_spc +\! mkdir -p /tmp/test_spc -- end_ignore DROP TABLESPACE IF EXISTS test_spc; CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; @@ -26,7 +26,6 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; -\! rm -rf /tmp/test_spc CREATE TABLE ao (i int) WITH (appendonly=true); INSERT INTO ao SELECT generate_series(1, 10000); diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index eaf116bf1b8..c1755457a2e 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -1,6 +1,6 @@ -- Test role quota -- start_ignore -\! mkdir /tmp/rolespc +\! mkdir -p /tmp/rolespc -- end_ignore DROP TABLESPACE IF EXISTS rolespc; CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; @@ -46,7 +46,7 @@ INSERT INTO b SELECT generate_series(1,100); -- Test alter tablespace -- start_ignore -\! mkdir /tmp/rolespc2 +\! mkdir -p /tmp/rolespc2 -- end_ignore DROP TABLESPACE IF EXISTS rolespc2; CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; @@ -83,5 +83,3 @@ RESET search_path; DROP SCHEMA rolespcrole; DROP TABLESPACE rolespc; DROP TABLESPACE rolespc2; -\! rm -rf /tmp/rolespc; -\! rm -rf /tmp/rolespc2 diff --git a/tests/regress/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql index a6159872a54..4242da5c4dd 100644 --- a/tests/regress/sql/test_tablespace_role_perseg.sql +++ b/tests/regress/sql/test_tablespace_role_perseg.sql @@ -1,6 +1,6 @@ -- Test role quota -- start_ignore -\! mkdir /tmp/rolespc_perseg +\! mkdir -p /tmp/rolespc_perseg -- end_ignore DROP TABLESPACE IF EXISTS rolespc_perseg; CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; @@ -47,7 +47,7 @@ INSERT INTO b SELECT generate_series(1,100); -- Test alter tablespace -- start_ignore -\! mkdir /tmp/rolespc_perseg2 +\! mkdir -p /tmp/rolespc_perseg2 -- end_ignore DROP TABLESPACE IF EXISTS rolespc_perseg2; CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; @@ -95,5 +95,3 @@ RESET search_path; DROP SCHEMA rolespc_persegrole; DROP TABLESPACE rolespc_perseg; DROP TABLESPACE rolespc_perseg2; -\! rm -rf /tmp/rolespc_perseg; -\! rm -rf /tmp/rolespc_perseg2 diff --git a/tests/regress/sql/test_tablespace_schema.sql b/tests/regress/sql/test_tablespace_schema.sql index b57f2180a9c..1fb85cd3a78 100644 --- a/tests/regress/sql/test_tablespace_schema.sql +++ b/tests/regress/sql/test_tablespace_schema.sql @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/schemaspc +\! mkdir -p /tmp/schemaspc -- end_ignore CREATE SCHEMA spcs1; DROP TABLESPACE IF EXISTS schemaspc; @@ -36,7 +36,7 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- Test alter tablespace -- start_ignore -\! mkdir /tmp/schemaspc2 +\! mkdir -p /tmp/schemaspc2 -- end_ignore DROP TABLESPACE IF EXISTS schemaspc2; CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; @@ -71,6 +71,4 @@ DROP TABLE spcs1.a2, spcs1.a; DROP SCHEMA spcs1, spcs2; DROP TABLESPACE schemaspc; DROP TABLESPACE schemaspc2; -\! rm -rf /tmp/schemaspc -\! rm -rf /tmp/schemaspc2 diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index 36cdeca9f67..56e7421052c 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -1,6 +1,6 @@ -- Test schema -- start_ignore -\! mkdir /tmp/schemaspc_perseg +\! mkdir -p /tmp/schemaspc_perseg -- end_ignore -- Test tablespace quota perseg CREATE SCHEMA spcs1_perseg; @@ -42,7 +42,7 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- Test alter tablespace -- start_ignore -\! mkdir /tmp/schemaspc_perseg2 +\! mkdir -p /tmp/schemaspc_perseg2 -- end_ignore DROP TABLESPACE IF EXISTS schemaspc_perseg2; CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; @@ -88,6 +88,4 @@ DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; DROP TABLESPACE schemaspc_perseg; DROP TABLESPACE schemaspc_perseg2; -\! rm -rf /tmp/schemaspc_perseg -\! rm -rf /tmp/schemaspc_perseg2 From d08b887a6b2ab9cd127489906fd4eb8d0a16ff54 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 10 Feb 2022 11:04:19 +0800 Subject: [PATCH 118/330] ereportif uses the 1st param as the condition (#131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise, compiler reports a warning: "comparison of constant ‘20’ with boolean expression is always false" --- diskquota_utility.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 0a109b698ce..ba62e2c2c41 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -266,9 +266,9 @@ diskquota_start_worker(PG_FUNCTION_ARGS) break; ResetLatch(&MyProc->procLatch); - ereportif(ERROR, - kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - (errmsg("[diskquota] diskquotal launcher pid = %d no longer exist", launcher_pid))); + ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) @@ -527,9 +527,9 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, break; ResetLatch(&MyProc->procLatch); - ereportif(ERROR, - kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - (errmsg("[diskquota] diskquotal launcher pid = %d no longer exist", launcher_pid))); + ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) From 5a23be85da3530baf3480e3386468568e61bc35e Mon Sep 17 00:00:00 2001 From: Sasasu Date: Wed, 26 Jan 2022 12:09:30 +0800 Subject: [PATCH 119/330] add diskquota.status() to show if hardlimit or softlimit enabled or not --- diskquota--1.0--2.0.sql | 6 ++ diskquota--2.0--1.0.sql | 2 + diskquota--2.0.sql | 7 ++ diskquota.c | 108 ++++++++++++++++++++ tests/regress/diskquota_schedule | 1 + tests/regress/expected/test_show_status.out | 77 ++++++++++++++ tests/regress/sql/test_show_status.sql | 20 ++++ 7 files changed, 221 insertions(+) create mode 100644 tests/regress/expected/test_show_status.out create mode 100644 tests/regress/sql/test_show_status.sql diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index e086f5580b4..995e68cf61b 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -117,3 +117,9 @@ CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_S CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; + +-- returns the current status in current database +CREATE OR REPLACE FUNCTION diskquota.status() +RETURNS TABLE ("name" text, "status" text) STRICT +AS 'MODULE_PATHNAME', 'diskquota_status' +LANGUAGE C; diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index dc8a50d865b..58804d4df59 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -14,6 +14,8 @@ DROP FUNCTION IF EXISTS diskquota.enable_hardlimit(); DROP FUNCTION IF EXISTS diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); +DROP FUNCTION IF EXISTS diskquota.status(); + DROP TYPE IF EXISTS diskquota.blackmap_entry; DROP VIEW IF EXISTS diskquota.blackmap; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 8898de9cd02..cbda632ebd5 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -236,6 +236,13 @@ BEGIN END; $$ LANGUAGE PLpgSQL; +-- returns the current status in current database +CREATE OR REPLACE FUNCTION diskquota.status() +RETURNS TABLE ("name" text, "status" text) STRICT +AS 'MODULE_PATHNAME', 'diskquota_status' +LANGUAGE C; + -- re-dispatch pause status to false. in case user pause-drop-recreate. -- refer to see test case 'test_drop_after_pause' SELECT from diskquota.resume(); + diff --git a/diskquota.c b/diskquota.c index 31523f17495..d6d510b68f1 100644 --- a/diskquota.c +++ b/diskquota.c @@ -19,6 +19,7 @@ #include "postgres.h" +#include "funcapi.h" #include "access/xact.h" #include "cdb/cdbvars.h" #include "commands/dbcommands.h" @@ -1079,3 +1080,110 @@ show_worker_epoch(PG_FUNCTION_ARGS) { PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); } + +static const char* diskquota_status_check_soft_limit() { + // should run on coordinator only. + Assert(IS_QUERY_DISPATCHER()); + + bool found, paused; + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + { + DiskQuotaWorkerEntry *hash_entry; + + hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, + (void*)&MyDatabaseId, + HASH_FIND, + &found); + paused = found ? hash_entry->is_paused : false; + } + LWLockRelease(diskquota_locks.worker_map_lock); + + // if worker no booted, aka 'CREATE EXTENSION' not called, diskquota is paused + if (!found) + return "paused"; + + // if worker booted, check 'worker_map->is_paused' + return paused ? "paused" : "enabled"; +} + +static const char* diskquota_status_check_hard_limit() +{ + // should run on coordinator only. + Assert(IS_QUERY_DISPATCHER()); + + bool hardlimit = pg_atomic_read_u32(diskquota_hardlimit); + + bool found, paused; + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + { + DiskQuotaWorkerEntry *hash_entry; + + hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, + (void*)&MyDatabaseId, + HASH_FIND, + &found); + paused = found ? hash_entry->is_paused : false; + } + LWLockRelease(diskquota_locks.worker_map_lock); + + // if worker booted and 'worker_map->is_paused == true' and hardlimit is enabled + // hard limits should also paused + if (found && paused && hardlimit) + return "paused"; + + return hardlimit ? "enabled": "disabled"; +} + +PG_FUNCTION_INFO_V1(diskquota_status); +Datum diskquota_status(PG_FUNCTION_ARGS) +{ + typedef struct Context { + int index; + } Context; + + typedef struct FeatureStatus { + const char* name; + const char* (*status)(void); + } FeatureStatus; + + static const FeatureStatus fs[] = { + {.name = "soft limits", .status = diskquota_status_check_soft_limit}, + {.name = "hard limits", .status = diskquota_status_check_hard_limit}, + }; + + FuncCallContext *funcctx; + + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + { + TupleDesc tupdesc = CreateTemplateTupleDesc(2, false); + TupleDescInitEntry(tupdesc, 1, "name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, 2, "status", TEXTOID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + Context *context = (Context *)palloc(sizeof(Context)); + context->index = 0; + funcctx->user_fctx = context; + } + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + Context *context = (Context *)funcctx->user_fctx; + + if (context->index >= sizeof(fs) / sizeof(FeatureStatus)) { + SRF_RETURN_DONE(funcctx); + } + + bool nulls[2] = {false, false}; + Datum v[2] = { + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].name)), + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].status())), + }; + ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo; + HeapTuple tuple = heap_form_tuple(rsi->expectedDesc, v, nulls); + + context->index++; + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); +} diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 7133e9ce703..179f7d4053c 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -7,6 +7,7 @@ test: test_uncommitted_table_size test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause +test: test_show_status # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_show_status.out b/tests/regress/expected/test_show_status.out new file mode 100644 index 00000000000..8d4fe819ae3 --- /dev/null +++ b/tests/regress/expected/test_show_status.out @@ -0,0 +1,77 @@ +select * from diskquota.status(); + name | status +-------------+---------- + soft limits | enabled + hard limits | disabled +(2 rows) + +select from diskquota.enable_hardlimit(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+--------- + soft limits | enabled + hard limits | enabled +(2 rows) + +select from diskquota.disable_hardlimit(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+---------- + soft limits | enabled + hard limits | disabled +(2 rows) + +select from diskquota.pause(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+---------- + soft limits | paused + hard limits | disabled +(2 rows) + +select from diskquota.enable_hardlimit(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+-------- + soft limits | paused + hard limits | paused +(2 rows) + +select from diskquota.disable_hardlimit(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+---------- + soft limits | paused + hard limits | disabled +(2 rows) + +select from diskquota.resume(); +-- +(1 row) + +select from diskquota.disable_hardlimit(); +-- +(1 row) + +select * from diskquota.status(); + name | status +-------------+---------- + soft limits | enabled + hard limits | disabled +(2 rows) + diff --git a/tests/regress/sql/test_show_status.sql b/tests/regress/sql/test_show_status.sql new file mode 100644 index 00000000000..d532406fc71 --- /dev/null +++ b/tests/regress/sql/test_show_status.sql @@ -0,0 +1,20 @@ +select * from diskquota.status(); + +select from diskquota.enable_hardlimit(); +select * from diskquota.status(); + +select from diskquota.disable_hardlimit(); +select * from diskquota.status(); + +select from diskquota.pause(); +select * from diskquota.status(); + +select from diskquota.enable_hardlimit(); +select * from diskquota.status(); + +select from diskquota.disable_hardlimit(); +select * from diskquota.status(); + +select from diskquota.resume(); +select from diskquota.disable_hardlimit(); +select * from diskquota.status(); From 193e34639a904b6d76aed01b40e34ef0bd4b5298 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 15 Feb 2022 17:12:07 +0800 Subject: [PATCH 120/330] ci: fix flaky test --- tests/regress/expected/test_extension.out | 46 +++++++++++------------ tests/regress/sql/test_extension.sql | 46 +++++++++++------------ 2 files changed, 44 insertions(+), 48 deletions(-) diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index c7d4942b7a5..1eceef7d6fe 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -18,12 +18,11 @@ show max_worker_processes; 20 (1 row) -\! sleep 4 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c dbx0 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 3 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -65,8 +64,7 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) -\! sleep 5 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 4 INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name:sx @@ -97,7 +95,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 6 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -121,7 +119,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 7 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -145,7 +143,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 8 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -169,7 +167,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 9 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -193,7 +191,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 10 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -217,7 +215,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -242,12 +240,12 @@ DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx10 CREATE EXTENSION diskquota; ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 \c dbx0 SELECT diskquota.pause(); @@ -263,7 +261,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 10 \c dbx1 SELECT diskquota.pause(); @@ -279,7 +277,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 9 \c dbx2 SELECT diskquota.pause(); @@ -295,7 +293,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 8 \c dbx3 SELECT diskquota.pause(); @@ -311,7 +309,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 7 \c dbx4 SELECT diskquota.pause(); @@ -327,7 +325,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 6 \c dbx5 SELECT diskquota.pause(); @@ -343,7 +341,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 5 \c dbx6 SELECT diskquota.pause(); @@ -359,7 +357,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 4 \c dbx7 SELECT diskquota.pause(); @@ -375,7 +373,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 3 \c dbx8 SELECT diskquota.pause(); @@ -391,7 +389,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c dbx9 SELECT diskquota.pause(); @@ -400,7 +398,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ERROR: schema "diskquota" does not exist DROP EXTENSION diskquota; ERROR: extension "diskquota" does not exist -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c dbx10 SELECT diskquota.pause(); @@ -409,7 +407,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ERROR: schema "diskquota" does not exist DROP EXTENSION diskquota; ERROR: extension "diskquota" does not exist -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c contrib_regression DROP DATABASE dbx0 ; diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 5d04db10682..6b41ac8698e 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -15,12 +15,11 @@ CREATE DATABASE dbx10 ; show max_worker_processes; -\! sleep 4 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx0 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -36,8 +35,7 @@ INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); SELECT diskquota.set_schema_quota('SX', '1MB'); -\! sleep 5 -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; @@ -54,7 +52,7 @@ DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -65,7 +63,7 @@ DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -76,7 +74,7 @@ DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -87,7 +85,7 @@ DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -98,7 +96,7 @@ DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -109,7 +107,7 @@ DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -120,77 +118,77 @@ DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx10 CREATE EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx0 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx1 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx2 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx3 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx4 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx5 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx6 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx7 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx8 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx9 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx10 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c contrib_regression From c7a6255dc00adb4918a8c79ff770307b01fff61b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Wed, 16 Feb 2022 15:19:34 +0800 Subject: [PATCH 121/330] Add timeout when waiting for workers (#130) Each time the state of Diskquota is changed, we need to wait for the change to take effect using diskquota.wait_for_worker_new_epoch(). However, when the bgworker is not alive, such wait can last forever. This patch fixes the issue by adding a timeout GUC so that wait() will throw an NOTICE if it times out, making it more user-friendly. To fix a race condition when CREATE EXTENSION, the user needs to SELECT wait_for_worker_new_epoch() manually before writing data. This is to wait until the current database is added to the monitored db cache so that active tables in the current database can be recorded. This patch also fix test script for activating standby and rename some of the cases to make them more clear. --- concourse/scripts/test_common.sh | 6 +- diskquota--2.0.sql | 23 +--- diskquota.c | 75 +++++++++++-- diskquota.h | 2 +- quotamodel.c | 1 + tests/isolation2/expected/cleanup.out | 0 tests/isolation2/expected/config.out | 30 ++++++ tests/isolation2/expected/init.out | 16 --- tests/isolation2/expected/prepare.out | 0 .../expected/test_create_extension.out | 15 +++ .../expected/test_drop_extension.out | 12 +++ .../expected/test_worker_timeout.out | 38 +++++++ tests/isolation2/isolation2_schedule | 7 +- tests/isolation2/sql/config.sql | 16 +++ tests/isolation2/sql/init.sql | 23 ---- tests/isolation2/sql/prepare.sql | 4 - .../isolation2/sql/test_create_extension.sql | 6 ++ .../{cleanup.sql => test_drop_extension.sql} | 2 - tests/isolation2/sql/test_worker_timeout.sql | 20 ++++ tests/regress/data/csmall.txt | 100 ------------------ tests/regress/diskquota_schedule | 7 +- tests/regress/expected/config.out | 20 ++++ tests/regress/expected/init.out | 20 ---- tests/regress/expected/prepare.out | 61 ----------- tests/regress/expected/test_copy.out | 1 + .../expected/test_create_extension.out | 14 +++ .../expected/test_drop_after_pause.out | 14 ++- .../{clean.out => test_drop_extension.out} | 3 - tests/regress/expected/test_extension.out | 65 +++++++++++- .../test_pause_and_resume_multiple_db.out | 14 ++- tests/regress/expected/test_recreate.out | 8 +- tests/regress/expected/test_schema.out | 46 ++++++++ tests/regress/expected/test_worker_epoch.out | 9 -- tests/regress/sql/config.sql | 16 +++ tests/regress/sql/init.sql | 21 ---- tests/regress/sql/prepare.sql | 24 ----- tests/regress/sql/test_copy.sql | 2 + tests/regress/sql/test_create_extension.sql | 6 ++ tests/regress/sql/test_drop_after_pause.sql | 2 + .../{clean.sql => test_drop_extension.sql} | 4 - tests/regress/sql/test_extension.sql | 19 +++- tests/regress/sql/test_insert_after_drop.sql | 1 + tests/regress/sql/test_manytable.sql | 3 +- .../sql/test_pause_and_resume_multiple_db.sql | 4 +- tests/regress/sql/test_recreate.sql | 3 +- tests/regress/sql/test_schema.sql | 19 ++++ tests/regress/sql/test_worker_epoch.sql | 5 - upgrade_test/sql/prepare.sql | 1 + upgrade_test/sql/test_manytable.sql | 3 +- 49 files changed, 468 insertions(+), 343 deletions(-) delete mode 100644 tests/isolation2/expected/cleanup.out create mode 100644 tests/isolation2/expected/config.out delete mode 100644 tests/isolation2/expected/init.out delete mode 100644 tests/isolation2/expected/prepare.out create mode 100644 tests/isolation2/expected/test_create_extension.out create mode 100644 tests/isolation2/expected/test_drop_extension.out create mode 100644 tests/isolation2/expected/test_worker_timeout.out create mode 100644 tests/isolation2/sql/config.sql delete mode 100644 tests/isolation2/sql/init.sql delete mode 100644 tests/isolation2/sql/prepare.sql create mode 100644 tests/isolation2/sql/test_create_extension.sql rename tests/isolation2/sql/{cleanup.sql => test_drop_extension.sql} (76%) create mode 100644 tests/isolation2/sql/test_worker_timeout.sql delete mode 100644 tests/regress/data/csmall.txt create mode 100644 tests/regress/expected/config.out delete mode 100644 tests/regress/expected/init.out delete mode 100644 tests/regress/expected/prepare.out create mode 100644 tests/regress/expected/test_create_extension.out rename tests/regress/expected/{clean.out => test_drop_extension.out} (74%) delete mode 100644 tests/regress/expected/test_worker_epoch.out create mode 100644 tests/regress/sql/config.sql delete mode 100644 tests/regress/sql/init.sql delete mode 100644 tests/regress/sql/prepare.sql create mode 100644 tests/regress/sql/test_create_extension.sql rename tests/regress/sql/{clean.sql => test_drop_extension.sql} (59%) delete mode 100644 tests/regress/sql/test_worker_epoch.sql diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh index 0a72ffe8518..fde28800738 100644 --- a/concourse/scripts/test_common.sh +++ b/concourse/scripts/test_common.sh @@ -39,9 +39,11 @@ function test(){ ## Bring down the QD. gpstop -may -M immediate export PGPORT=6001 - echo "export PGPROT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh + export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby + gpactivatestandby -ad \$MASTER_DATA_DIRECTORY + echo "export PGPORT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh + echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh - gpactivatestandby -ad ${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby make installcheck fi popd diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index cbda632ebd5..1d78f3f5de3 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -214,27 +214,8 @@ LANGUAGE C; -- In this case, we must ensure this UDF can be interrupted by the user. CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT -AS $$ -DECLARE - current_epoch bigint; - new_epoch bigint; -BEGIN - current_epoch := diskquota.show_worker_epoch(); - LOOP - new_epoch := diskquota.show_worker_epoch(); - IF new_epoch <> current_epoch THEN - current_epoch := new_epoch; - LOOP - new_epoch := diskquota.show_worker_epoch(); - IF new_epoch <> current_epoch THEN - RETURN TRUE; - END IF; - END LOOP; - END IF; - END LOOP; - RETURN FALSE; -END; -$$ LANGUAGE PLpgSQL; +AS 'MODULE_PATHNAME', 'wait_for_worker_new_epoch' +LANGUAGE C; -- returns the current status in current database CREATE OR REPLACE FUNCTION diskquota.status() diff --git a/diskquota.c b/diskquota.c index d6d510b68f1..10b41a9c2f3 100644 --- a/diskquota.c +++ b/diskquota.c @@ -24,14 +24,17 @@ #include "cdb/cdbvars.h" #include "commands/dbcommands.h" #include "executor/spi.h" +#include "port/atomics.h" #include "storage/ipc.h" #include "storage/proc.h" #include "tcop/idle_resource_cleaner.h" #include "tcop/utility.h" #include "utils/builtins.h" +#include "utils/faultinjector.h" #include "utils/ps_status.h" #include "utils/snapmgr.h" #include "utils/syscache.h" +#include "utils/timestamp.h" PG_MODULE_MAGIC; @@ -46,6 +49,7 @@ static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ int diskquota_naptime = 0; int diskquota_max_active_tables = 0; +int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -236,6 +240,19 @@ define_guc_variables(void) NULL, NULL, NULL); + + DefineCustomIntVariable("diskquota.worker_timeout", + "Duration between each check (in seconds).", + NULL, + &diskquota_worker_timeout, + 60, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -361,6 +378,8 @@ disk_quota_worker_main(Datum main_arg) ProcessConfigFile(PGC_SIGHUP); } + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); + /* Do the work */ if (!diskquota_is_paused()) refresh_disk_quota_model(false); @@ -1007,7 +1026,7 @@ start_worker_by_dboid(Oid dbid) { workerentry->handle = handle; workerentry->pid = pid; - workerentry->epoch = 0; + pg_atomic_write_u32(&(workerentry->epoch), 0); workerentry->is_paused = false; } @@ -1036,7 +1055,7 @@ is_valid_dbid(Oid dbid) bool worker_increase_epoch(Oid database_oid) { - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); bool found = false; DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( @@ -1044,32 +1063,32 @@ worker_increase_epoch(Oid database_oid) if (found) { - ++(workerentry->epoch); + pg_atomic_fetch_add_u32(&(workerentry->epoch), 1); } LWLockRelease(diskquota_locks.worker_map_lock); return found; } -unsigned int +uint32 worker_get_epoch(Oid database_oid) { LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); bool found = false; - unsigned int epoch = 0; + uint32 epoch = 0; DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); if (found) { - epoch = workerentry->epoch; + epoch = pg_atomic_read_u32(&(workerentry->epoch)); } LWLockRelease(diskquota_locks.worker_map_lock); if (!found) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] worker not found for database \"%s\"", - get_database_name(MyDatabaseId)))); + get_database_name(database_oid)))); } return epoch; } @@ -1187,3 +1206,45 @@ Datum diskquota_status(PG_FUNCTION_ARGS) context->index++; SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } + +static bool +check_for_timeout(TimestampTz start_time) +{ + long diff_secs = 0; + int diff_usecs = 0; + TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); + if (diff_secs >= diskquota_worker_timeout) + { + ereport(NOTICE, ( + errmsg("[diskquota] timeout when waiting for worker"), + errhint("please check if the bgworker is still alive."))); + return true; + } + return false; +} + +PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); +Datum +wait_for_worker_new_epoch(PG_FUNCTION_ARGS) +{ + TimestampTz start_time = GetCurrentTimestamp(); + uint32 current_epoch = worker_get_epoch(MyDatabaseId); + for (;;) + { + CHECK_FOR_INTERRUPTS(); + if (check_for_timeout(start_time)) + start_time = GetCurrentTimestamp(); + uint32 new_epoch = worker_get_epoch(MyDatabaseId); + /* Unsigned integer underflow is OK */ + if (new_epoch - current_epoch >= 2u) + { + PG_RETURN_BOOL(true); + } + /* Sleep for naptime to reduce CPU usage */ + (void) WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT, + diskquota_naptime ? diskquota_naptime : 1); + ResetLatch(&MyProc->procLatch); + } + PG_RETURN_BOOL(false); +} diff --git a/diskquota.h b/diskquota.h index 8933c4a6630..1ebe7243789 100644 --- a/diskquota.h +++ b/diskquota.h @@ -115,7 +115,7 @@ struct DiskQuotaWorkerEntry { Oid dbid; pid_t pid; /* worker pid */ - unsigned int epoch; /* this counter will be increased after each worker loop */ + pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ bool is_paused; /* true if this worker is paused */ BackgroundWorkerHandle *handle; }; diff --git a/quotamodel.c b/quotamodel.c index 6b3083fb6e3..ad84cccc7b8 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -26,6 +26,7 @@ #include "executor/spi.h" #include "funcapi.h" #include "storage/ipc.h" +#include "port/atomics.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/faultinjector.h" diff --git a/tests/isolation2/expected/cleanup.out b/tests/isolation2/expected/cleanup.out deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out new file mode 100644 index 00000000000..8dfe5f193b6 --- /dev/null +++ b/tests/isolation2/expected/config.out @@ -0,0 +1,30 @@ + +!\retcode gpconfig -c shared_preload_libraries -v diskquota; +(exited with code 0) +!\retcode gpstop -raf; +(exited with code 0) + +!\retcode gpconfig -c diskquota.naptime -v 0; +(exited with code 0) +!\retcode gpconfig -c max_worker_processes -v 20; +(exited with code 0) +!\retcode gpstop -raf; +(exited with code 0) + +-- Show the values of all GUC variables +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) +1: SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 1048576 +(1 row) +1: SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) + diff --git a/tests/isolation2/expected/init.out b/tests/isolation2/expected/init.out deleted file mode 100644 index 84b31e5e8a3..00000000000 --- a/tests/isolation2/expected/init.out +++ /dev/null @@ -1,16 +0,0 @@ --- start_ignore -CREATE DATABASE diskquota; -CREATE --- end_ignore - --- start_ignore -\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null -- end_ignore -\! echo $? -- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null -- end_ignore -\! echo $? -- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null -- end_ignore -\! echo $? --- start_ignore -\! gpstop -raf > /dev/null -- end_ignore -\! echo $? -\! sleep 10 diff --git a/tests/isolation2/expected/prepare.out b/tests/isolation2/expected/prepare.out deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/isolation2/expected/test_create_extension.out b/tests/isolation2/expected/test_create_extension.out new file mode 100644 index 00000000000..211ebd639f6 --- /dev/null +++ b/tests/isolation2/expected/test_create_extension.out @@ -0,0 +1,15 @@ +CREATE EXTENSION diskquota; +CREATE + +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) diff --git a/tests/isolation2/expected/test_drop_extension.out b/tests/isolation2/expected/test_drop_extension.out new file mode 100644 index 00000000000..4a9e4ecb16f --- /dev/null +++ b/tests/isolation2/expected/test_drop_extension.out @@ -0,0 +1,12 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +DROP EXTENSION diskquota; +DROP diff --git a/tests/isolation2/expected/test_worker_timeout.out b/tests/isolation2/expected/test_worker_timeout.out new file mode 100644 index 00000000000..5f855a7b80c --- /dev/null +++ b/tests/isolation2/expected/test_worker_timeout.out @@ -0,0 +1,38 @@ +!\retcode gpconfig -c diskquota.worker_timeout -v 1; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); + pg_sleep +---------- + +(1 row) + +SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; + pg_cancel_backend +------------------- + t +(1 row) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1<: <... completed> +ERROR: canceling statement due to user request + +!\retcode gpconfig -r diskquota.worker_timeout; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index c300d301330..29ac7cba283 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -1,7 +1,8 @@ -test: init -test: prepare +test: config +test: test_create_extension test: test_relation_size test: test_blackmap test: test_vacuum test: test_truncate -test: cleanup +test: test_worker_timeout +test: test_drop_extension diff --git a/tests/isolation2/sql/config.sql b/tests/isolation2/sql/config.sql new file mode 100644 index 00000000000..5c4f3170f82 --- /dev/null +++ b/tests/isolation2/sql/config.sql @@ -0,0 +1,16 @@ +--start_ignore +CREATE DATABASE diskquota; +--end_ignore + +!\retcode gpconfig -c shared_preload_libraries -v diskquota; +!\retcode gpstop -raf; + +!\retcode gpconfig -c diskquota.naptime -v 0; +!\retcode gpconfig -c max_worker_processes -v 20; +!\retcode gpstop -raf; + +-- Show the values of all GUC variables +1: SHOW diskquota.naptime; +1: SHOW diskquota.max_active_tables; +1: SHOW diskquota.worker_timeout; + \ No newline at end of file diff --git a/tests/isolation2/sql/init.sql b/tests/isolation2/sql/init.sql deleted file mode 100644 index 5c57c4bbdcd..00000000000 --- a/tests/isolation2/sql/init.sql +++ /dev/null @@ -1,23 +0,0 @@ --- start_ignore -CREATE DATABASE diskquota; --- end_ignore - --- start_ignore -\! gpconfig -c shared_preload_libraries -v 'diskquota' > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null --- end_ignore -\! echo $? - --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? - -\! sleep 10 diff --git a/tests/isolation2/sql/prepare.sql b/tests/isolation2/sql/prepare.sql deleted file mode 100644 index 220ed13e887..00000000000 --- a/tests/isolation2/sql/prepare.sql +++ /dev/null @@ -1,4 +0,0 @@ --- start_ignore -CREATE EXTENSION diskquota; -SELECT diskquota.init_table_size_table(); --- end_ignore diff --git a/tests/isolation2/sql/test_create_extension.sql b/tests/isolation2/sql/test_create_extension.sql new file mode 100644 index 00000000000..1cc9c9cb940 --- /dev/null +++ b/tests/isolation2/sql/test_create_extension.sql @@ -0,0 +1,6 @@ +CREATE EXTENSION diskquota; + +SELECT diskquota.init_table_size_table(); + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/isolation2/sql/cleanup.sql b/tests/isolation2/sql/test_drop_extension.sql similarity index 76% rename from tests/isolation2/sql/cleanup.sql rename to tests/isolation2/sql/test_drop_extension.sql index fa8de5acd8e..09f5b11fa7a 100644 --- a/tests/isolation2/sql/cleanup.sql +++ b/tests/isolation2/sql/test_drop_extension.sql @@ -1,5 +1,3 @@ --- start_ignore SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; --- end_ignore diff --git a/tests/isolation2/sql/test_worker_timeout.sql b/tests/isolation2/sql/test_worker_timeout.sql new file mode 100644 index 00000000000..630cd7f88f2 --- /dev/null +++ b/tests/isolation2/sql/test_worker_timeout.sql @@ -0,0 +1,20 @@ +!\retcode gpconfig -c diskquota.worker_timeout -v 1; +!\retcode gpstop -u; + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=-1; + +1&: SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); + +SELECT pg_cancel_backend(pid) FROM pg_stat_activity +WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=-1; + +1<: + +!\retcode gpconfig -r diskquota.worker_timeout; +!\retcode gpstop -u; diff --git a/tests/regress/data/csmall.txt b/tests/regress/data/csmall.txt deleted file mode 100644 index f6d8fb48fba..00000000000 --- a/tests/regress/data/csmall.txt +++ /dev/null @@ -1,100 +0,0 @@ -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 179f7d4053c..45b2c147ceb 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,6 +1,5 @@ -test: init -test: prepare -test: test_worker_epoch +test: config +test: test_create_extension test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size @@ -29,4 +28,4 @@ test: test_ctas_role test: test_ctas_schema test: test_ctas_tablespace_role test: test_ctas_tablespace_schema -test: clean +test: test_drop_extension diff --git a/tests/regress/expected/config.out b/tests/regress/expected/config.out new file mode 100644 index 00000000000..2bf6188f41d --- /dev/null +++ b/tests/regress/expected/config.out @@ -0,0 +1,20 @@ +\c +-- Show the values of all GUC variables +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) + +SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 1048576 +(1 row) + +SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) + diff --git a/tests/regress/expected/init.out b/tests/regress/expected/init.out deleted file mode 100644 index 7b09388e986..00000000000 --- a/tests/regress/expected/init.out +++ /dev/null @@ -1,20 +0,0 @@ --- start_ignore -\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpconfig -c diskquota.naptime -v 0 > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? -0 diff --git a/tests/regress/expected/prepare.out b/tests/regress/expected/prepare.out deleted file mode 100644 index 107a00a2378..00000000000 --- a/tests/regress/expected/prepare.out +++ /dev/null @@ -1,61 +0,0 @@ -CREATE EXTENSION diskquota; --- start_ignore -\! gpstop -u -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Starting gpstop with args: -u -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Gathering information and validating the environment... -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Obtaining Greenplum Master catalog information -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Obtaining Segment details from master... -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.18.2+dev.173.g55557f44f3 build dev' -20211216:15:13:33:152977 gpstop:laptop:v-[INFO]:-Signalling all postmaster processes to reload --- end_ignore -\! cp data/csmall.txt /tmp/csmall.txt --- disable hardlimit feature. -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - --- prepare a schema that has reached quota limit -CREATE SCHEMA badquota; -DROP ROLE IF EXISTS testbody; -NOTICE: role "testbody" does not exist, skipping -CREATE ROLE testbody; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE badquota.t1(i INT); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000); -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - -SELECT diskquota.set_schema_quota('badquota', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT size, segid FROM diskquota.table_size - WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') - ORDER BY segid DESC; - size | segid ----------+------- - 1310720 | 2 - 1310720 | 1 - 1310720 | 0 - 3932160 | -1 -(4 rows) - --- expect fail -INSERT INTO badquota.t1 SELECT generate_series(0, 10); -ERROR: schema's disk space quota exceeded with name:badquota diff --git a/tests/regress/expected/test_copy.out b/tests/regress/expected/test_copy.out index f8cc758536b..bebe959dbef 100644 --- a/tests/regress/expected/test_copy.out +++ b/tests/regress/expected/test_copy.out @@ -7,6 +7,7 @@ SELECT diskquota.set_schema_quota('s3', '1 MB'); (1 row) SET search_path TO s3; +\! seq 100 > /tmp/csmall.txt CREATE TABLE c (i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/expected/test_create_extension.out b/tests/regress/expected/test_create_extension.out new file mode 100644 index 00000000000..a90178ce350 --- /dev/null +++ b/tests/regress/expected/test_create_extension.out @@ -0,0 +1,14 @@ +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 32e492778eb..4ec538b00d7 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -1,6 +1,12 @@ CREATE DATABASE test_drop_after_pause; \c test_drop_after_pause CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT diskquota.pause(); pause ------- @@ -15,6 +21,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT diskquota.enable_hardlimit(); enable_hardlimit ------------------ @@ -38,7 +50,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:746125 (seg0 127.0.0.1:6002 pid=22648) +ERROR: schema's disk space quota exceeded with name:16933 (seg2 127.0.0.1:6004 pid=24622) SELECT diskquota.disable_hardlimit(); disable_hardlimit ------------------- diff --git a/tests/regress/expected/clean.out b/tests/regress/expected/test_drop_extension.out similarity index 74% rename from tests/regress/expected/clean.out rename to tests/regress/expected/test_drop_extension.out index 0b0773f848f..b946654c7f3 100644 --- a/tests/regress/expected/clean.out +++ b/tests/regress/expected/test_drop_extension.out @@ -1,6 +1,3 @@ -DROP TABLE badquota.t1; -DROP ROLE testbody; -DROP SCHEMA badquota; SELECT diskquota.pause(); pause ------- diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 1eceef7d6fe..8f56ce29db7 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -1,6 +1,5 @@ --- NOTE: when test this script, you must make sure that there is no diskquota launcher --- process or diskquota worker process. i.e. `ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l` --- returns 0 +-- NOTE: when test this script, you must make sure that there is no diskquota +-- worker process. CREATE DATABASE dbx0 ; CREATE DATABASE dbx1 ; CREATE DATABASE dbx2 ; @@ -20,10 +19,18 @@ show max_worker_processes; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 +-- FIXME: We need to sleep for a while each time after CREATE EXTENSION and +-- DROP EXTENSION to wait for the bgworker to start or to exit. \c dbx0 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 3 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -58,6 +65,12 @@ SELECT diskquota.init_table_size_table(); (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -71,6 +84,12 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 5 CREATE SCHEMA SX; @@ -97,6 +116,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 6 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -121,6 +146,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 7 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -145,6 +176,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 8 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -169,6 +206,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 9 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -193,6 +236,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 10 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -217,6 +266,12 @@ DROP TABLE SX.a; CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -242,11 +297,15 @@ CREATE EXTENSION diskquota; ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 +SELECT diskquota.wait_for_worker_new_epoch(); +ERROR: schema "diskquota" does not exist \c dbx10 CREATE EXTENSION diskquota; ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 +SELECT diskquota.wait_for_worker_new_epoch(); +ERROR: schema "diskquota" does not exist \c dbx0 SELECT diskquota.pause(); pause diff --git a/tests/regress/expected/test_pause_and_resume_multiple_db.out b/tests/regress/expected/test_pause_and_resume_multiple_db.out index 92028606b7d..d7ce4f66c8d 100644 --- a/tests/regress/expected/test_pause_and_resume_multiple_db.out +++ b/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -7,6 +7,12 @@ CREATE DATABASE test_new_create_database; \c test_pause_and_resume CREATE SCHEMA s1; CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \c contrib_regression CREATE TABLE s1.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -97,7 +103,13 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed \c test_new_create_database; CREATE SCHEMA s1; -CREATE EXTENSION diskquota; -- new database should be active although other database is paused +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE TABLE s1.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/expected/test_recreate.out b/tests/regress/expected/test_recreate.out index d87713d409f..c69cd82e77e 100644 --- a/tests/regress/expected/test_recreate.out +++ b/tests/regress/expected/test_recreate.out @@ -3,7 +3,13 @@ CREATE DATABASE test_recreate; \c diskquota INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; \c test_recreate -CREATE EXTENSION diskquota; -- shoud be ok +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT diskquota.pause(); pause ------- diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index a19384c4e8e..030b78a539b 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -41,6 +41,49 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed INSERT INTO s2.a SELECT generate_series(1,200); +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +DROP ROLE IF EXISTS testbody; +NOTICE: role "testbody" does not exist, skipping +CREATE ROLE testbody; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE badquota.t1(i INT); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; + size | segid +---------+------- + 1310720 | 2 + 1310720 | 1 + 1310720 | 0 + 3932160 | -1 +(4 rows) + +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); +ERROR: schema's disk space quota exceeded with name:badquota ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); @@ -60,3 +103,6 @@ SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE RESET search_path; DROP TABLE s1.a2, badquota.a; DROP SCHEMA s1, s2; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; diff --git a/tests/regress/expected/test_worker_epoch.out b/tests/regress/expected/test_worker_epoch.out deleted file mode 100644 index 0d1875f1212..00000000000 --- a/tests/regress/expected/test_worker_epoch.out +++ /dev/null @@ -1,9 +0,0 @@ --- Test if the UDF returns successfully. --- NOTE: This test should be the first one since the UDF is supposed --- to be used in all other tests. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql new file mode 100644 index 00000000000..64908e80269 --- /dev/null +++ b/tests/regress/sql/config.sql @@ -0,0 +1,16 @@ +--start_ignore +CREATE DATABASE diskquota; + +\! gpconfig -c shared_preload_libraries -v diskquota +\! gpstop -raf + +\! gpconfig -c diskquota.naptime -v 0 +\! gpconfig -c max_worker_processes -v 20 +\! gpstop -raf +--end_ignore + +\c +-- Show the values of all GUC variables +SHOW diskquota.naptime; +SHOW diskquota.max_active_tables; +SHOW diskquota.worker_timeout; diff --git a/tests/regress/sql/init.sql b/tests/regress/sql/init.sql deleted file mode 100644 index ff9a16a0e9c..00000000000 --- a/tests/regress/sql/init.sql +++ /dev/null @@ -1,21 +0,0 @@ --- start_ignore -CREATE DATABASE diskquota; --- end_ignore - --- start_ignore -\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c diskquota.naptime -v 0 > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null --- end_ignore -\! echo $? - --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? diff --git a/tests/regress/sql/prepare.sql b/tests/regress/sql/prepare.sql deleted file mode 100644 index 1a9dec104b0..00000000000 --- a/tests/regress/sql/prepare.sql +++ /dev/null @@ -1,24 +0,0 @@ -CREATE EXTENSION diskquota; --- start_ignore -\! gpstop -u --- end_ignore -\! cp data/csmall.txt /tmp/csmall.txt - --- disable hardlimit feature. -SELECT diskquota.disable_hardlimit(); - --- prepare a schema that has reached quota limit -CREATE SCHEMA badquota; -DROP ROLE IF EXISTS testbody; -CREATE ROLE testbody; -CREATE TABLE badquota.t1(i INT); -ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000); -SELECT diskquota.init_table_size_table(); -SELECT diskquota.set_schema_quota('badquota', '1 MB'); -SELECT diskquota.wait_for_worker_new_epoch(); -SELECT size, segid FROM diskquota.table_size - WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') - ORDER BY segid DESC; --- expect fail -INSERT INTO badquota.t1 SELECT generate_series(0, 10); diff --git a/tests/regress/sql/test_copy.sql b/tests/regress/sql/test_copy.sql index abac9fb7c81..aca811e0b25 100644 --- a/tests/regress/sql/test_copy.sql +++ b/tests/regress/sql/test_copy.sql @@ -3,6 +3,8 @@ CREATE SCHEMA s3; SELECT diskquota.set_schema_quota('s3', '1 MB'); SET search_path TO s3; +\! seq 100 > /tmp/csmall.txt + CREATE TABLE c (i int); COPY c FROM '/tmp/csmall.txt'; -- expect failed diff --git a/tests/regress/sql/test_create_extension.sql b/tests/regress/sql/test_create_extension.sql new file mode 100644 index 00000000000..dfbc96a373e --- /dev/null +++ b/tests/regress/sql/test_create_extension.sql @@ -0,0 +1,6 @@ +CREATE EXTENSION diskquota; + +SELECT diskquota.init_table_size_table(); + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); \ No newline at end of file diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql index b034fead210..003e2dd6d17 100644 --- a/tests/regress/sql/test_drop_after_pause.sql +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -3,11 +3,13 @@ CREATE DATABASE test_drop_after_pause; \c test_drop_after_pause CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); SELECT diskquota.enable_hardlimit(); diff --git a/tests/regress/sql/clean.sql b/tests/regress/sql/test_drop_extension.sql similarity index 59% rename from tests/regress/sql/clean.sql rename to tests/regress/sql/test_drop_extension.sql index bb84eb2b7e1..09f5b11fa7a 100644 --- a/tests/regress/sql/clean.sql +++ b/tests/regress/sql/test_drop_extension.sql @@ -1,7 +1,3 @@ -DROP TABLE badquota.t1; -DROP ROLE testbody; -DROP SCHEMA badquota; - SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 6b41ac8698e..e07577c6bdd 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -1,6 +1,5 @@ --- NOTE: when test this script, you must make sure that there is no diskquota launcher --- process or diskquota worker process. i.e. `ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l` --- returns 0 +-- NOTE: when test this script, you must make sure that there is no diskquota +-- worker process. CREATE DATABASE dbx0 ; CREATE DATABASE dbx1 ; CREATE DATABASE dbx2 ; @@ -17,9 +16,13 @@ show max_worker_processes; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +-- FIXME: We need to sleep for a while each time after CREATE EXTENSION and +-- DROP EXTENSION to wait for the bgworker to start or to exit. + \c dbx0 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -34,6 +37,7 @@ CREATE TABLE SX.a(i int); INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); +SELECT diskquota.wait_for_worker_new_epoch(); SELECT diskquota.set_schema_quota('SX', '1MB'); \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l INSERT INTO SX.a values(generate_series(0, 10)); @@ -41,6 +45,7 @@ DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -53,6 +58,7 @@ DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -64,6 +70,7 @@ DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -75,6 +82,7 @@ DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -86,6 +94,7 @@ DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -97,6 +106,7 @@ DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -108,6 +118,7 @@ DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -119,10 +130,12 @@ DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); \c dbx10 CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +SELECT diskquota.wait_for_worker_new_epoch(); \c dbx0 SELECT diskquota.pause(); diff --git a/tests/regress/sql/test_insert_after_drop.sql b/tests/regress/sql/test_insert_after_drop.sql index 60a925411e1..d811dfc7d22 100644 --- a/tests/regress/sql/test_insert_after_drop.sql +++ b/tests/regress/sql/test_insert_after_drop.sql @@ -1,6 +1,7 @@ CREATE DATABASE db_insert_after_drop; \c db_insert_after_drop CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- Test Drop Extension CREATE SCHEMA sdrtbl; SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); diff --git a/tests/regress/sql/test_manytable.sql b/tests/regress/sql/test_manytable.sql index eb90225f87b..a5f24cb7cee 100644 --- a/tests/regress/sql/test_manytable.sql +++ b/tests/regress/sql/test_manytable.sql @@ -25,5 +25,6 @@ DROP DATABASE test_manytable01; DROP DATABASE test_manytable02; -- start_ignore -\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null +\! gpconfig -r diskquota.max_active_tables +\! gpstop -far -- end_ignore diff --git a/tests/regress/sql/test_pause_and_resume_multiple_db.sql b/tests/regress/sql/test_pause_and_resume_multiple_db.sql index 6a8cd67ca7a..8b61c39cab7 100644 --- a/tests/regress/sql/test_pause_and_resume_multiple_db.sql +++ b/tests/regress/sql/test_pause_and_resume_multiple_db.sql @@ -9,6 +9,7 @@ CREATE DATABASE test_new_create_database; \c test_pause_and_resume CREATE SCHEMA s1; CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); \c contrib_regression CREATE TABLE s1.a(i int); @@ -44,7 +45,8 @@ INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed \c test_new_create_database; CREATE SCHEMA s1; -CREATE EXTENSION diskquota; -- new database should be active although other database is paused +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused CREATE TABLE s1.a(i int); INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed SELECT diskquota.set_schema_quota('s1', '1 MB'); diff --git a/tests/regress/sql/test_recreate.sql b/tests/regress/sql/test_recreate.sql index 4581df96c85..2e29656cef0 100644 --- a/tests/regress/sql/test_recreate.sql +++ b/tests/regress/sql/test_recreate.sql @@ -7,7 +7,8 @@ CREATE DATABASE test_recreate; INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; \c test_recreate -CREATE EXTENSION diskquota; -- shoud be ok +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_schema.sql b/tests/regress/sql/test_schema.sql index 932199db470..a8f568dd442 100644 --- a/tests/regress/sql/test_schema.sql +++ b/tests/regress/sql/test_schema.sql @@ -24,6 +24,22 @@ INSERT INTO a2 SELECT generate_series(1,200); -- expect insert succeed INSERT INTO s2.a SELECT generate_series(1,200); +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +DROP ROLE IF EXISTS testbody; +CREATE ROLE testbody; +CREATE TABLE badquota.t1(i INT); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); +SELECT diskquota.set_schema_quota('badquota', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); + ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); @@ -35,3 +51,6 @@ RESET search_path; DROP TABLE s1.a2, badquota.a; DROP SCHEMA s1, s2; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; diff --git a/tests/regress/sql/test_worker_epoch.sql b/tests/regress/sql/test_worker_epoch.sql deleted file mode 100644 index 493b43954fb..00000000000 --- a/tests/regress/sql/test_worker_epoch.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Test if the UDF returns successfully. --- NOTE: This test should be the first one since the UDF is supposed --- to be used in all other tests. - -SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/upgrade_test/sql/prepare.sql b/upgrade_test/sql/prepare.sql index 5cf05d2d2ac..0782a7e261e 100644 --- a/upgrade_test/sql/prepare.sql +++ b/upgrade_test/sql/prepare.sql @@ -1,4 +1,5 @@ CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- start_ignore \! gpstop -u SELECT diskquota.init_table_size_table(); diff --git a/upgrade_test/sql/test_manytable.sql b/upgrade_test/sql/test_manytable.sql index dbd64ea64e7..d724926c953 100644 --- a/upgrade_test/sql/test_manytable.sql +++ b/upgrade_test/sql/test_manytable.sql @@ -25,5 +25,6 @@ DROP DATABASE test_manytable01; DROP DATABASE test_manytable02; -- start_ignore -\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null +\! gpconfig -r diskquota.max_active_tables +\! gpstop -far -- end_ignore From 688fa9d2ed824eea7f9b174877410a647f2cc697 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 17 Feb 2022 23:42:30 +0800 Subject: [PATCH 122/330] Add git commit message template (#141) The template is copied from https://gist.github.com/lisawolderiksen/a7b99d94c92c6671181611be1641c733 --- .gitmessage/commit.template | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .gitmessage/commit.template diff --git a/.gitmessage/commit.template b/.gitmessage/commit.template new file mode 100644 index 00000000000..bbc420e5ac8 --- /dev/null +++ b/.gitmessage/commit.template @@ -0,0 +1,22 @@ +# Title: Summary, imperative, start upper case, don't end with a period +# No more than 50 chars. #### 50 chars is here: # + +# Remember blank line between title and body. + +# Body: Explain *what* and *why* (not *how*). Include task ID (tracker issue). +# Wrap at 72 chars. ################################## which is here: # + +# At the end: Include Co-authored-by for all contributors. +# Include at least one empty line before it. Format: +# Co-authored-by: name +# +# How to Write a Git Commit Message: +# https://chris.beams.io/posts/git-commit/ +# +# 1.Separate subject from body with a blank line +# 2. Limit the subject line to 50 characters +# 3. Capitalize the subject line +# 4. Do not end the subject line with a period +# 5. Use the imperative mood in the subject line +# 6. Wrap the body at 72 characters +# 7. Use the body to explain what and why vs. how From 0f052d569f80e4ef153c6b5fde5938a971408bbe Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 18 Feb 2022 17:19:26 +0800 Subject: [PATCH 123/330] Use guc for hardlimit (#142) - Change to use GUC to set hardlimit instead of UDF. Since the hardlimit setting needs to be persistent after postmaster restarting. - Fix relevant test cases. --- diskquota--1.0--2.0.sql | 10 --- diskquota--2.0--1.0.sql | 4 - diskquota--2.0.sql | 10 --- diskquota.c | 15 +++- diskquota.h | 2 +- quotamodel.c | 90 +------------------ tests/regress/expected/config.out | 6 ++ tests/regress/expected/test_ctas_pause.out | 16 +--- tests/regress/expected/test_ctas_role.out | 16 +--- tests/regress/expected/test_ctas_schema.out | 16 +--- .../expected/test_ctas_tablespace_role.out | 16 +--- .../expected/test_ctas_tablespace_schema.out | 16 +--- .../expected/test_drop_after_pause.out | 16 +--- tests/regress/expected/test_show_status.out | 30 +++---- tests/regress/sql/config.sql | 6 +- tests/regress/sql/test_ctas_pause.sql | 6 +- tests/regress/sql/test_ctas_role.sql | 6 +- tests/regress/sql/test_ctas_schema.sql | 7 +- .../regress/sql/test_ctas_tablespace_role.sql | 6 +- .../sql/test_ctas_tablespace_schema.sql | 6 +- tests/regress/sql/test_drop_after_pause.sql | 6 +- tests/regress/sql/test_show_status.sql | 15 ++-- 22 files changed, 95 insertions(+), 226 deletions(-) diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 995e68cf61b..adc7899485e 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -32,16 +32,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME', 'diskquota_resume' LANGUAGE C; -CREATE OR REPLACE FUNCTION diskquota.enable_hardlimit() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_enable_hardlimit' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.disable_hardlimit() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_disable_hardlimit' -LANGUAGE C; - CREATE TYPE diskquota.blackmap_entry AS (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index 58804d4df59..6bd6cd71ac1 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -8,10 +8,6 @@ DROP FUNCTION IF EXISTS diskquota.pause(); DROP FUNCTION IF EXISTS diskquota.resume(); -DROP FUNCTION IF EXISTS diskquota.disable_hardlimit(); - -DROP FUNCTION IF EXISTS diskquota.enable_hardlimit(); - DROP FUNCTION IF EXISTS diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); DROP FUNCTION IF EXISTS diskquota.status(); diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 1d78f3f5de3..710fc352dc3 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -93,16 +93,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME', 'diskquota_resume' LANGUAGE C; -CREATE OR REPLACE FUNCTION diskquota.enable_hardlimit() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_enable_hardlimit' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.disable_hardlimit() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_disable_hardlimit' -LANGUAGE C; - CREATE VIEW diskquota.show_fast_schema_quota_view AS select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes from diskquota.table_size as ts, diff --git a/diskquota.c b/diskquota.c index 10b41a9c2f3..7edf71bb15c 100644 --- a/diskquota.c +++ b/diskquota.c @@ -50,6 +50,7 @@ static volatile sig_atomic_t got_sigusr1 = false; int diskquota_naptime = 0; int diskquota_max_active_tables = 0; int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ +bool diskquota_hardlimit = false; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -229,7 +230,7 @@ define_guc_variables(void) NULL); DefineCustomIntVariable("diskquota.max_active_tables", - "max number of active tables monitored by disk-quota", + "Max number of active tables monitored by disk-quota.", NULL, &diskquota_max_active_tables, 1 * 1024 * 1024, @@ -253,6 +254,16 @@ define_guc_variables(void) NULL, NULL, NULL); + DefineCustomBoolVariable("diskquota.hard_limit", + "Set this to 'on' to enable disk-quota hardlimit.", + NULL, + &diskquota_hardlimit, + false, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -1130,7 +1141,7 @@ static const char* diskquota_status_check_hard_limit() // should run on coordinator only. Assert(IS_QUERY_DISPATCHER()); - bool hardlimit = pg_atomic_read_u32(diskquota_hardlimit); + bool hardlimit = diskquota_hardlimit; bool found, paused; LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); diff --git a/diskquota.h b/diskquota.h index 1ebe7243789..238369ab80f 100644 --- a/diskquota.h +++ b/diskquota.h @@ -106,7 +106,6 @@ typedef enum MessageResult MessageResult; extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; -extern pg_atomic_uint32 *diskquota_hardlimit; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -142,6 +141,7 @@ extern void init_disk_quota_hook(void); extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); extern int diskquota_naptime; extern int diskquota_max_active_tables; +extern bool diskquota_hardlimit; extern int SEGCOUNT; extern int get_ext_major_version(void); diff --git a/quotamodel.c b/quotamodel.c index ad84cccc7b8..2c0c360a726 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -166,8 +166,6 @@ static HTAB *table_size_map = NULL; static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; -pg_atomic_uint32 *diskquota_hardlimit = NULL; - static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ @@ -445,12 +443,6 @@ disk_quota_shmem_startup(void) &hash_ctl, HASH_ELEM | HASH_FUNCTION); - diskquota_hardlimit = ShmemInitStruct("diskquota_hardlimit", - sizeof(pg_atomic_uint32), - &found); - if (!found) - memset((void *) diskquota_hardlimit, 0, sizeof(pg_atomic_uint32)); - /* use disk_quota_worker_map to manage diskquota worker processes. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); @@ -506,7 +498,6 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(DiskQuotaWorkerEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); - size += sizeof(bool); /* sizeof(*diskquota_hardlimit) */ return size; } @@ -745,7 +736,7 @@ refresh_disk_quota_usage(bool is_init) /* copy local black map back to shared black map */ flush_local_black_map(); /* Dispatch blackmap entries to segments to perform hard-limit. */ - if (pg_atomic_read_u32(diskquota_hardlimit)) + if (diskquota_hardlimit) dispatch_blackmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } @@ -1576,7 +1567,7 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) if (OidIsValid(reloid)) return check_blackmap_by_reloid(reloid); - enable_hardlimit = pg_atomic_read_u32(diskquota_hardlimit); + enable_hardlimit = diskquota_hardlimit; #ifdef FAULT_INJECTOR if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) @@ -2143,80 +2134,3 @@ show_blackmap(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } - -static void -dispatch_hardlimit_flag(bool enable_hardlimit) -{ - CdbPgResults cdb_pgresults = {NULL, 0}; - int i; - StringInfoData sql; - - initStringInfo(&sql); - appendStringInfo(&sql, "SELECT diskquota.%s", - enable_hardlimit ? "enable_hardlimit()" : "disable_hardlimit()"); - CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); - - for (i = 0; i < cdb_pgresults.numResults; ++i) - { - PGresult *pgresult = cdb_pgresults.pg_results[i]; - if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) - { - cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("[diskquota] cannot %s hardlimit on segments, encounter unexpected result from segment: %d", - enable_hardlimit ? "enable" : "disable", - PQresultStatus(pgresult)))); - } - } - cdbdisp_clearCdbPgResults(&cdb_pgresults); -} - -PG_FUNCTION_INFO_V1(diskquota_enable_hardlimit); -Datum -diskquota_enable_hardlimit(PG_FUNCTION_ARGS) -{ - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to enable hardlimit"))); - - /* - * If this UDF is executed on segment servers, we should clear - * the blackmap firstly, or the relation may be blocked by the - * blackmap dispatched by the previous iteration. - */ - if (!IS_QUERY_DISPATCHER()) - { - HASH_SEQ_STATUS hash_seq; - GlobalBlackMapEntry *blackmapentry; - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); - hash_seq_init(&hash_seq, disk_quota_black_map); - while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) - hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_REMOVE, NULL); - LWLockRelease(diskquota_locks.black_map_lock); - } - - pg_atomic_write_u32(diskquota_hardlimit, true); - - if (IS_QUERY_DISPATCHER()) - dispatch_hardlimit_flag(true /*enable_hardlimit*/); - - PG_RETURN_VOID(); -} - -PG_FUNCTION_INFO_V1(diskquota_disable_hardlimit); -Datum -diskquota_disable_hardlimit(PG_FUNCTION_ARGS) -{ - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to disable hardlimit"))); - - pg_atomic_write_u32(diskquota_hardlimit, false); - - if (IS_QUERY_DISPATCHER()) - dispatch_hardlimit_flag(false /*enable_hardlimit*/); - - PG_RETURN_VOID(); -} diff --git a/tests/regress/expected/config.out b/tests/regress/expected/config.out index 2bf6188f41d..c147f0adde4 100644 --- a/tests/regress/expected/config.out +++ b/tests/regress/expected/config.out @@ -18,3 +18,9 @@ SHOW diskquota.worker_timeout; 60 (1 row) +SHOW diskquota.hard_limit; + diskquota.hard_limit +---------------------- + off +(1 row) + diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out index c1dd71bf939..ae68b31acf9 100644 --- a/tests/regress/expected/test_ctas_pause.out +++ b/tests/regress/expected/test_ctas_pause.out @@ -1,11 +1,7 @@ CREATE SCHEMA hardlimit_s; SET search_path TO hardlimit_s; -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); set_schema_quota ------------------ @@ -33,12 +29,8 @@ CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.resume(); resume -------- diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index 71ca544db4d..07c79dc6eae 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -1,10 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null CREATE ROLE hardlimit_r; NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); @@ -85,9 +81,5 @@ NOTICE: table "aocs_table" does not exist, skipping RESET ROLE; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected/test_ctas_schema.out b/tests/regress/expected/test_ctas_schema.out index 549a70a3156..81a1fb613ed 100644 --- a/tests/regress/expected/test_ctas_schema.out +++ b/tests/regress/expected/test_ctas_schema.out @@ -1,10 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the schema quota. -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null CREATE SCHEMA hardlimit_s; SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); set_schema_quota @@ -60,12 +56,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null DROP TABLE IF EXISTS t1; NOTICE: table "t1" does not exist, skipping DROP TABLE IF EXISTS toast_table; diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out index 5c6df2e3bad..bb551201223 100644 --- a/tests/regress/expected/test_ctas_tablespace_role.out +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -1,10 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- start_ignore \! mkdir -p /tmp/ctas_rolespc -- end_ignore @@ -86,9 +82,5 @@ RESET default_tablespace; DROP TABLESPACE ctas_rolespc; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected/test_ctas_tablespace_schema.out b/tests/regress/expected/test_ctas_tablespace_schema.out index f63f324cd8b..5d62898953b 100644 --- a/tests/regress/expected/test_ctas_tablespace_schema.out +++ b/tests/regress/expected/test_ctas_tablespace_schema.out @@ -1,10 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- start_ignore \! mkdir -p /tmp/ctas_schemaspc -- end_ignore @@ -82,9 +78,5 @@ RESET search_path; RESET default_tablespace; DROP SCHEMA hardlimit_s; DROP TABLESPACE ctas_schemaspc; -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 4ec538b00d7..26451fd799e 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -27,12 +27,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -SELECT diskquota.enable_hardlimit(); - enable_hardlimit ------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null CREATE SCHEMA SX; CREATE TABLE SX.a(i int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. @@ -51,12 +47,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail ERROR: schema's disk space quota exceeded with name:16933 (seg2 127.0.0.1:6004 pid=24622) -SELECT diskquota.disable_hardlimit(); - disable_hardlimit -------------------- - -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.pause(); pause ------- diff --git a/tests/regress/expected/test_show_status.out b/tests/regress/expected/test_show_status.out index 8d4fe819ae3..68997f9a775 100644 --- a/tests/regress/expected/test_show_status.out +++ b/tests/regress/expected/test_show_status.out @@ -5,10 +5,8 @@ select * from diskquota.status(); hard limits | disabled (2 rows) -select from diskquota.enable_hardlimit(); --- -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); name | status -------------+--------- @@ -16,10 +14,8 @@ select * from diskquota.status(); hard limits | enabled (2 rows) -select from diskquota.disable_hardlimit(); --- -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); name | status -------------+---------- @@ -38,10 +34,8 @@ select * from diskquota.status(); hard limits | disabled (2 rows) -select from diskquota.enable_hardlimit(); --- -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); name | status -------------+-------- @@ -49,10 +43,8 @@ select * from diskquota.status(); hard limits | paused (2 rows) -select from diskquota.disable_hardlimit(); --- -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); name | status -------------+---------- @@ -64,10 +56,8 @@ select from diskquota.resume(); -- (1 row) -select from diskquota.disable_hardlimit(); --- -(1 row) - +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); name | status -------------+---------- diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index 64908e80269..2a14961f3c3 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -1,11 +1,12 @@ --start_ignore CREATE DATABASE diskquota; -\! gpconfig -c shared_preload_libraries -v diskquota +\! gpconfig -c shared_preload_libraries -v diskquota \! gpstop -raf -\! gpconfig -c diskquota.naptime -v 0 +\! gpconfig -c diskquota.naptime -v 0 \! gpconfig -c max_worker_processes -v 20 +\! gpconfig -c diskquota.hard_limit -v "off" \! gpstop -raf --end_ignore @@ -14,3 +15,4 @@ CREATE DATABASE diskquota; SHOW diskquota.naptime; SHOW diskquota.max_active_tables; SHOW diskquota.worker_timeout; +SHOW diskquota.hard_limit; diff --git a/tests/regress/sql/test_ctas_pause.sql b/tests/regress/sql/test_ctas_pause.sql index c7c1a78a41d..8980ce904c4 100644 --- a/tests/regress/sql/test_ctas_pause.sql +++ b/tests/regress/sql/test_ctas_pause.sql @@ -1,7 +1,8 @@ CREATE SCHEMA hardlimit_s; SET search_path TO hardlimit_s; -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); @@ -13,7 +14,8 @@ SELECT diskquota.pause(); CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.resume(); DROP SCHEMA hardlimit_s CASCADE; diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql index 436fd2b97eb..19750be032e 100644 --- a/tests/regress/sql/test_ctas_role.sql +++ b/tests/regress/sql/test_ctas_role.sql @@ -1,5 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null CREATE ROLE hardlimit_r; SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; @@ -35,4 +36,5 @@ DROP TABLE IF EXISTS aocs_table; RESET ROLE; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/sql/test_ctas_schema.sql b/tests/regress/sql/test_ctas_schema.sql index e21375b8690..e4e4db3752c 100644 --- a/tests/regress/sql/test_ctas_schema.sql +++ b/tests/regress/sql/test_ctas_schema.sql @@ -1,5 +1,7 @@ -- Test that diskquota is able to cancel a running CTAS query by the schema quota. -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + CREATE SCHEMA hardlimit_s; SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SET search_path TO hardlimit_s; @@ -23,7 +25,8 @@ CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups. -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS toast_table; DROP TABLE IF EXISTS ao_table; diff --git a/tests/regress/sql/test_ctas_tablespace_role.sql b/tests/regress/sql/test_ctas_tablespace_role.sql index 4b6ded807d8..628ef94d191 100644 --- a/tests/regress/sql/test_ctas_tablespace_role.sql +++ b/tests/regress/sql/test_ctas_tablespace_role.sql @@ -1,5 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- start_ignore \! mkdir -p /tmp/ctas_rolespc -- end_ignore @@ -43,4 +44,5 @@ RESET default_tablespace; DROP TABLESPACE ctas_rolespc; REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; DROP ROLE hardlimit_r; -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/sql/test_ctas_tablespace_schema.sql b/tests/regress/sql/test_ctas_tablespace_schema.sql index 0caac946d12..57a3c73e497 100644 --- a/tests/regress/sql/test_ctas_tablespace_schema.sql +++ b/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -1,5 +1,6 @@ -- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- start_ignore \! mkdir -p /tmp/ctas_schemaspc @@ -41,4 +42,5 @@ RESET search_path; RESET default_tablespace; DROP SCHEMA hardlimit_s; DROP TABLESPACE ctas_schemaspc; -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql index 003e2dd6d17..f79ce3c32fc 100644 --- a/tests/regress/sql/test_drop_after_pause.sql +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -11,7 +11,8 @@ DROP EXTENSION diskquota; CREATE EXTENSION diskquota; SELECT diskquota.wait_for_worker_new_epoch(); -SELECT diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null CREATE SCHEMA SX; CREATE TABLE SX.a(i int); @@ -19,7 +20,8 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail -SELECT diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_show_status.sql b/tests/regress/sql/test_show_status.sql index d532406fc71..b59af2c7b69 100644 --- a/tests/regress/sql/test_show_status.sql +++ b/tests/regress/sql/test_show_status.sql @@ -1,20 +1,25 @@ select * from diskquota.status(); -select from diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); -select from diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); select from diskquota.pause(); select * from diskquota.status(); -select from diskquota.enable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); -select from diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); select from diskquota.resume(); -select from diskquota.disable_hardlimit(); +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null select * from diskquota.status(); From 78d72e445aa49fbd205e62697c7c2026ad0eb8b7 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 21 Feb 2022 15:52:18 +0800 Subject: [PATCH 124/330] be nice with scheduler when naptime = 0 (#120) --- diskquota.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/diskquota.c b/diskquota.c index 7edf71bb15c..7d4cbe271c8 100644 --- a/diskquota.c +++ b/diskquota.c @@ -41,6 +41,8 @@ PG_MODULE_MAGIC; #define DISKQUOTA_DB "diskquota" #define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" +extern int usleep(useconds_t usec); // in + /* flags set by signal handlers */ static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; @@ -335,6 +337,10 @@ disk_quota_worker_main(Datum main_arg) diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); + // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true + if (!diskquota_naptime) + usleep(1); + /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); @@ -378,6 +384,10 @@ disk_quota_worker_main(Datum main_arg) diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); + // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true + if (!diskquota_naptime) + usleep(1); + /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); @@ -481,6 +491,10 @@ disk_quota_launcher_main(Datum main_arg) diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); + // wait at least one time slice, avoid 100% CPU usage + if (!diskquota_naptime) + usleep(1); + /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) proc_exit(1); From 0e2de7aede26b1ceceec2c0ad7ed7a324e692160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Mon, 21 Feb 2022 16:00:39 +0800 Subject: [PATCH 125/330] Create worker entry before starting worker (#144) Currently, the Diskquota launcher first starts a worker, then creates the worker entry. However, after the worker starts, it cannot find the entry when trying to check the is_paused status. Also, after a GPDB restart, when the QD checks whether the worker is running by checking the epoch, it might also fail to find the entry. This patch fixes the issue by first create the worker entry then starting the bgworker process. --- diskquota.c | 70 +++++++++++++++++++++++++++++++++++++++-------------- diskquota.h | 1 - 2 files changed, 52 insertions(+), 19 deletions(-) diff --git a/diskquota.c b/diskquota.c index 7d4cbe271c8..060cce150e3 100644 --- a/diskquota.c +++ b/diskquota.c @@ -981,6 +981,53 @@ terminate_all_workers(void) LWLockRelease(diskquota_locks.worker_map_lock); } +static bool +worker_create_entry(Oid dbid) +{ + DiskQuotaWorkerEntry *workerentry = NULL; + bool found = false; + + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + + workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) &dbid, + HASH_ENTER, &found); + if (!found) + { + workerentry->handle = NULL; + pg_atomic_write_u32(&(workerentry->epoch), 0); + workerentry->is_paused = false; + } + + LWLockRelease(diskquota_locks.worker_map_lock); + return found; +} + +static bool +worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) +{ + DiskQuotaWorkerEntry *workerentry = NULL; + bool found = false; + + LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + + workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, + (void *) &dbid, + HASH_ENTER, &found); + if (found) + { + workerentry->handle = handle; + } + LWLockRelease(diskquota_locks.worker_map_lock); + if (!found) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] worker not found for database \"%s\"", + get_database_name(dbid)))); + } + return found; +} + /* * Dynamically launch an disk quota worker process. * This function is called when laucher process receive @@ -995,9 +1042,10 @@ start_worker_by_dboid(Oid dbid) MemoryContext old_ctx; char *dbname; pid_t pid; - bool found; bool ret; - DiskQuotaWorkerEntry *workerentry; + + /* Create entry first so that it can be checked by bgworker and QD. */ + worker_create_entry(dbid); memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | @@ -1041,22 +1089,8 @@ start_worker_by_dboid(Oid dbid) Assert(status == BGWH_STARTED); - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - - /* put the worker handle into the worker map */ - workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) &dbid, - HASH_ENTER, &found); - if (!found) - { - workerentry->handle = handle; - workerentry->pid = pid; - pg_atomic_write_u32(&(workerentry->epoch), 0); - workerentry->is_paused = false; - } - - LWLockRelease(diskquota_locks.worker_map_lock); - + /* Save the handle to the worker map to check the liveness. */ + worker_set_handle(dbid, handle); return true; } diff --git a/diskquota.h b/diskquota.h index 238369ab80f..298f1c6f5e4 100644 --- a/diskquota.h +++ b/diskquota.h @@ -113,7 +113,6 @@ typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; struct DiskQuotaWorkerEntry { Oid dbid; - pid_t pid; /* worker pid */ pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ bool is_paused; /* true if this worker is paused */ BackgroundWorkerHandle *handle; From e4a19d70db77053aca3226144052699ce72f406d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Mon, 21 Feb 2022 17:47:23 +0800 Subject: [PATCH 126/330] Extend fetch_table_stat() to update db cache (#138) The db cache stores which databases enables diskquota. Active tables will be recorded only if they are in those databases. Previously, we created a new UDF update_diskquota_db_list() to add the current db to the cache. However, the UDF is install in a wrong database. As a result, after the user upgrade from a previous version to 1.0.3, the bgworker does not find the UDF and can do nothing. This patch fixes the issue by removing update_diskquota_db_list() and using fetch_table_stat() to update db cache. fetch_table_stat() already exists since version 1.0.0 so that no new UDF is needed. This PR is to replace PR #99 , and depends on PR #130 to fix a race condition that occurs after CREATE EXTENSION. --- diskquota--1.0.sql | 5 --- diskquota--2.0.sql | 5 --- diskquota.c | 20 ++------- diskquota.h | 4 +- diskquota_utility.c | 38 ++++++++-------- gp_activetable.c | 29 ++++++++++--- gp_activetable.h | 1 + quotamodel.c | 15 ++++--- tests/regress/diskquota_schedule | 1 + .../regress/expected/test_update_db_cache.out | 43 +++++++++++++++++++ tests/regress/sql/test_update_db_cache.sql | 36 ++++++++++++++++ 11 files changed, 138 insertions(+), 59 deletions(-) create mode 100644 tests/regress/expected/test_update_db_cache.out create mode 100644 tests/regress/sql/test_update_db_cache.sql diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index af22a2ff76e..7ce807e6583 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -21,11 +21,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 710fc352dc3..057e9beaca5 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -43,11 +43,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - CREATE TYPE diskquota.blackmap_entry AS (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) diff --git a/diskquota.c b/diskquota.c index 060cce150e3..50df27e3b71 100644 --- a/diskquota.c +++ b/diskquota.c @@ -132,15 +132,15 @@ _PG_init(void) init_disk_quota_enforcement(); init_active_table_hook(); + /* Add dq_object_access_hook to handle drop extension event. */ + register_diskquota_object_access_hook(); + /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { return; } - /* Add dq_object_access_hook to handle drop extension event. */ - register_diskquota_object_access_hook(); - /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; @@ -548,10 +548,7 @@ create_monitor_db_table(void) bool ret = true; sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" - "create schema if not exists diskquota;" - "create or replace function diskquota.update_diskquota_db_list(oid, int4) returns void " - "strict as '$libdir/diskquota' language C;"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; StartTransactionCommand(); @@ -917,15 +914,6 @@ del_dbid_from_database_list(Oid dbid) ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } pfree(str.data); - - /* clean the dbid from shared memory*/ - initStringInfo(&str); - appendStringInfo(&str, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 1)" - " from gp_dist_random('gp_id');", dbid); - ret = SPI_execute(str.data, true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); - pfree(str.data); } /* diff --git a/diskquota.h b/diskquota.h index 298f1c6f5e4..d9e596e7556 100644 --- a/diskquota.h +++ b/diskquota.h @@ -32,7 +32,9 @@ typedef enum typedef enum { FETCH_ACTIVE_OID, /* fetch active table list */ - FETCH_ACTIVE_SIZE /* fetch size for active tables */ + FETCH_ACTIVE_SIZE, /* fetch size for active tables */ + ADD_DB_TO_MONITOR, + REMOVE_DB_FROM_BEING_MONITORED, } FetchTableStatType; typedef enum diff --git a/diskquota_utility.c b/diskquota_utility.c index ba62e2c2c41..8f093cc9c53 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -56,7 +56,6 @@ PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); -PG_FUNCTION_INFO_V1(update_diskquota_db_list); PG_FUNCTION_INFO_V1(set_per_segment_quota); PG_FUNCTION_INFO_V1(relation_size_local); @@ -500,6 +499,17 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, if (oid != objectId) goto out; + /* + * Remove the current database from monitored db cache + * on all segments and on coordinator. + */ + update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); + + if (!IS_QUERY_DISPATCHER()) + { + return; + } + /* * Lock on extension_ddl_lock to avoid multiple backend create diskquota * extension at the same time. @@ -1013,38 +1023,29 @@ get_size_in_mb(char *str) /* * Function to update the db list on each segment + * Will print a WARNING to log if out of memory */ -Datum -update_diskquota_db_list(PG_FUNCTION_ARGS) +void +update_diskquota_db_list(Oid dbid, HASHACTION action) { - Oid dbid = PG_GETARG_OID(0); - int mode = PG_GETARG_INT32(1); bool found = false; - if (!superuser()) - { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to update db list"))); - } - /* add/remove the dbid to monitoring database cache to filter out table not under * monitoring in hook functions */ LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - if (mode == 0) + if (action == HASH_ENTER) { Oid *entry = NULL; - entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); - elog(WARNING, "add dbid %u into SHM", dbid); - if (!found && entry == NULL) + entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) { ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } } - else if (mode == 1) + else if (action == HASH_REMOVE) { hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); if (!found) @@ -1054,9 +1055,6 @@ update_diskquota_db_list(PG_FUNCTION_ARGS) } } LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); - - PG_RETURN_VOID(); - } /* diff --git a/gp_activetable.c b/gp_activetable.c index 352d998d7bd..cbeb772f55d 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -348,11 +348,22 @@ gp_fetch_active_tables(bool is_init) /* * Function to get the table size from each segments - * There are three mode: - * 1. gather active table oid from all the segments, since table may only - * be modified on a subset of the segments, we need to firstly gather the - * active table oid list from all the segments. - * 2. calculate the active table size based on the active table oid list. + * There are 4 modes: + * + * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since + * table may only be modified on a subset of the segments, we need to firstly + * gather the active table oid list from all the segments. + * + * - FETCH_ACTIVE_SIZE: calculate the active table size based on the active + * table oid list. + * + * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that + * active tables in the current database will be recorded. This is used each + * time a worker starts. + * + * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored + * db cache so that active tables in the current database will be recorded. + * This is used when DROP EXTENSION. */ Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS) @@ -400,6 +411,12 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) case FETCH_ACTIVE_SIZE: localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); break; + case ADD_DB_TO_MONITOR: + update_diskquota_db_list(MyDatabaseId, HASH_ENTER); + break; + case REMOVE_DB_FROM_BEING_MONITORED: + update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); + break; default: ereport(ERROR, (errmsg("Unused mode number, transaction will be aborted"))); break; @@ -410,7 +427,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) * total number of active tables to be returned, each tuple contains * one active table stat */ - funcctx->max_calls = (uint32) hash_get_num_entries(localCacheTable); + funcctx->max_calls = localCacheTable ? (uint32) hash_get_num_entries(localCacheTable) : 0; /* * prepare attribute metadata for next calls that generate the tuple diff --git a/gp_activetable.h b/gp_activetable.h index c2b0cfcea6e..66ccc2916e8 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -29,6 +29,7 @@ extern HTAB *gp_fetch_active_tables(bool force); extern void init_active_table_hook(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); +extern void update_diskquota_db_list(Oid dbid, HASHACTION action); extern HTAB *active_tables_map; extern HTAB *monitoring_dbid_cache; diff --git a/quotamodel.c b/quotamodel.c index 2c0c360a726..5a2cf606312 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -612,15 +612,18 @@ do_check_diskquota_state_is_ready(void) int i; StringInfoData sql_command; - /* Add the dbid to watching list, so the hook can catch the table change*/ initStringInfo(&sql_command); - appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id') UNION ALL select -1, diskquota.update_diskquota_db_list(%u, 0);", - MyDatabaseId, MyDatabaseId); + /* Add current database to the monitored db cache on all segments */ + appendStringInfo(&sql_command, + "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " + "FROM gp_dist_random('gp_id');", ADD_DB_TO_MONITOR); ret = SPI_execute(sql_command.data, true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); pfree(sql_command.data); + /* Add current database to the monitored db cache on coordinator */ + update_diskquota_db_list(MyDatabaseId, HASH_ENTER); /* * check diskquota state from table diskquota.state errors will be catch * at upper level function. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 45b2c147ceb..bae728b4904 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -7,6 +7,7 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status +test: test_update_db_cache # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out new file mode 100644 index 00000000000..a22374c48d6 --- /dev/null +++ b/tests/regress/expected/test_update_db_cache.out @@ -0,0 +1,43 @@ +--start_ignore +CREATE DATABASE test_db_cache; +--end_ignore +\c test_db_cache +CREATE EXTENSION diskquota; +CREATE TABLE t(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 't'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + t | 3637248 | -1 + t | 1212416 | 0 + t | 1212416 | 1 + t | 1212416 | 2 +(4 rows) + +DROP EXTENSION diskquota; +-- Create table without extension +CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. +-- Should find nothing since t_no_extension is not recorded. +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +DROP TABLE t; +DROP TABLE t_no_extension; +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_db_cache; diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql new file mode 100644 index 00000000000..5da48e137bf --- /dev/null +++ b/tests/regress/sql/test_update_db_cache.sql @@ -0,0 +1,36 @@ +--start_ignore +CREATE DATABASE test_db_cache; +--end_ignore + +\c test_db_cache +CREATE EXTENSION diskquota; + +CREATE TABLE t(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 't'::regclass +ORDER BY segid; + +DROP EXTENSION diskquota; + +-- Create table without extension +CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; + +-- Should find nothing since t_no_extension is not recorded. +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +DROP TABLE t; +DROP TABLE t_no_extension; + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_db_cache; \ No newline at end of file From 88991b364304e6f48d04810dbb42b6e50cf429ad Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 21 Feb 2022 18:58:34 +0800 Subject: [PATCH 127/330] Use ytt to create pipelines (#143) - Rewrite the pipelines by using ytt for code reuse. - Add `fly.sh` for easier manipulating with pipelines. - Create configs for pr, commit and dev pipelines. - Fix the test task for rhel8. - Use build/test images pair for all distros. And modify the `build_diskquota.sh` so it won't need `Python.h` during the configure stage. - Fix the pipiline bug which a `ubuntu` test may use a `rhel6` diskquota binary to do the test. That is caused by the same name of the task output. --- concourse/fly.sh | 115 ++++++++++++++++++++++ concourse/pipeline/README.md | 65 +++++++++++++ concourse/pipeline/base.lib.yml | 34 +++++++ concourse/pipeline/commit.yml | 29 ++++++ concourse/pipeline/dev.yml | 26 +++++ concourse/pipeline/job_def.lib.yml | 100 +++++++++++++++++++ concourse/pipeline/pr.yml | 24 +++++ concourse/pipeline/res_def.yml | 129 +++++++++++++++++++++++++ concourse/pipeline/trigger_def.lib.yml | 56 +++++++++++ concourse/scripts/build_diskquota.sh | 37 +++---- concourse/scripts/test_diskquota.sh | 1 - 11 files changed, 590 insertions(+), 26 deletions(-) create mode 100755 concourse/fly.sh create mode 100644 concourse/pipeline/README.md create mode 100644 concourse/pipeline/base.lib.yml create mode 100644 concourse/pipeline/commit.yml create mode 100644 concourse/pipeline/dev.yml create mode 100644 concourse/pipeline/job_def.lib.yml create mode 100644 concourse/pipeline/pr.yml create mode 100644 concourse/pipeline/res_def.yml create mode 100644 concourse/pipeline/trigger_def.lib.yml diff --git a/concourse/fly.sh b/concourse/fly.sh new file mode 100755 index 00000000000..efa59701211 --- /dev/null +++ b/concourse/fly.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +set -e + +workspace=${WORKSPACE:-"$HOME/workspace"} +fly=${FLY:-"fly"} +echo "'workspace' location: ${workspace}" +echo "'fly' command: ${fly}" +echo "" + +usage() { + echo "Usage: $0 -t -c [-p ] [-b branch]" 1>&2 + if [ -n "$1" ]; then + echo "$1" + fi + exit 1 +} + +# Parse command line options +while getopts ":c:t:p:b:" o; do + case "${o}" in + c) + # pipeline type/config. pr/commit/dev/release + pipeline_config=${OPTARG} + ;; + t) + # concourse target + target=${OPTARG} + ;; + p) + # pipeline name + pipeline_name=${OPTARG} + ;; + b) + # branch name + branch=${OPTARG} + ;; + *) + usage "" + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then + usage "" +fi + +# Decide ytt options to generate pipeline +case ${pipeline_config} in + pr) + if [ -z "${pipeline_name}" ]; then + pipeline_name="PR:diskquota" + fi + config_file="pr.yml" + hook_res="diskquota_pr" + ;; + commit) + if [ -z "${pipeline_name}" ]; then + pipeline_name="COMMIT:diskquota:gpdb" + fi + # Default branch is 'gpdb' as it is our main branch + if [ -z "${branch}" ]; then + branch="gpdb" + fi + config_file="commit.yml" + hook_res="diskquota_commit" + ;; + dev) + if [ -z "${pipeline_name}" ]; then + usage "'-p' needs to be supplied to specify the pipeline name for flying a 'dev' pipeline." + fi + pipeline_name="DEV:${pipeline_name}" + config_file="dev.yml" + ;; + *) + usage "" + ;; +esac + +yml_path="/tmp/diskquota_pipeline.yml" +my_path=$(realpath "${BASH_SOURCE[0]}") +ytt_base=$(dirname "${my_path}")/pipeline + +ytt --data-values-file "${ytt_base}/res_def.yml" \ + -f "${ytt_base}/base.lib.yml" \ + -f "${ytt_base}/job_def.lib.yml" \ + -f "${ytt_base}/trigger_def.lib.yml" \ + -f "${ytt_base}/${config_file}" > "${yml_path}" +echo "Generated pipeline yaml '${yml_path}'." + +echo "" +echo "Fly the pipeline..." +set -v +"${fly}" \ + -t "${target}" \ + sp \ + -p "${pipeline_name}" \ + -c "${yml_path}" \ + -l "${workspace}/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml" \ + -l "${workspace}/gp-continuous-integration/secrets/gp-extensions-common.yml" \ + -l "${workspace}/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml" \ + -v "diskquota-branch=${branch}" +set +v + +if [ "${pipeline_config}" == "dev" ]; then + exit 0 +fi + +echo "" +echo "================================================================================" +echo "Remeber to set the the webhook URL on GitHub:" +echo "https://extensions.ci.gpdb.pivotal.io/api/v1/teams/main/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" +echo "You may need to change the base URL if a differnt concourse server is used." +echo "================================================================================" diff --git a/concourse/pipeline/README.md b/concourse/pipeline/README.md new file mode 100644 index 00000000000..5e623fa5e58 --- /dev/null +++ b/concourse/pipeline/README.md @@ -0,0 +1,65 @@ +# Pipelines + +## Naming Prefix Rule + +- `PR:` for pull-request pipelines +- `COMMIT::` for branch pipelines. It will be executed when a commit committed/merged into the branch. +- `DEV:_[any_other_info]` for personal development usage. Put your name into the pipeline name so others can know who own it. + +## Pipelines for daily work + +### PR Pipeline + +https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/PR:diskquota + +### Main Branch Pipeline + +The development happens on the `gpdb` branch. The commit pipeline for the `gpdb` +https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/COMMIT:diskquota:gpdb + + +# Fly a pipeline + +## Prerequisite + +- Install [ytt](https://carvel.dev/ytt/). It's written in go. So just download the executable for your platform from the [release page](https://github.com/vmware-tanzu/carvel-ytt/releases). +- Make the `fly` command in the `PATH` or export its location to `FLY` env. +- Clone the `gp-continuous-integration` repo to `$HOME/workspace` or set its parent directory to `WORKSPACE` env. +- Login with the `fly` command. Assume we are using `extension` as the target name. + + ``` + fly -t extension login -c https://extensions.ci.gpdb.pivotal.io + ``` +- `cd` to the `concourse` directory. + +## Fly the PR pipeline + +``` +./fly.sh -t extension -c pr +``` + +## Fly the commit pipeline + +``` +./fly.sh -t extension -c commit +``` + +## Fly the release pipeline + +TBD + +## Fly the dev pipeline + +``` +./fly.sh -t extension -c dev -p _diskquota -b +``` + +## Webhook + +By default, the PR and commit pipelines are using webhook instead of polling to trigger a build. The webhook URL will be printed when flying such a pipeline by `fly.sh`. The webhook needs to be set in the `github repository` -> `Settings` -> `Webhooks` with push notification enabled. + +To test if the webhook works, use `curl` to send a `POST` request to the hook URL with some random data. If it is the right URL, the relevant resource will be refreshed on the Concourse UI. The command line looks like: + +``` +curl --data-raw "foo" +``` diff --git a/concourse/pipeline/base.lib.yml b/concourse/pipeline/base.lib.yml new file mode 100644 index 00000000000..3b71d4ca05b --- /dev/null +++ b/concourse/pipeline/base.lib.yml @@ -0,0 +1,34 @@ +#@ load("@ytt:data", "data") +#! add_res_by_xxx is to solve the unused resources error for concourse +#@ def add_res_by_conf(res_map, job_conf): +#@ for key in job_conf: +#@ if key.startswith("res_"): +#@ res_name = job_conf[key] +#@ res_map[res_name] = True +#@ end +#@ end +#@ end +#@ +#@ def add_res_by_name(res_map, res_name): +#@ res_map[res_name] = True +#@ end +#@ +#@ def declare_res(res_type_map, res_map): +#@ for val in data.values.resources: +#@ res_name = val["name"] +#@ res_type = val["type"] +#@ if res_map.get(val["name"]): +#@ res_type_map[res_type] = True + - #@ val +#@ end +#@ end +#@ end +#@ +#@ def declare_res_type(res_type_map): +#@ for val in data.values.resource_types: +#@ type_name = val["name"] +#@ if res_type_map.get(type_name): + - #@ val +#@ end +#@ end +#@ end diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml new file mode 100644 index 00000000000..d6875d1839e --- /dev/null +++ b/concourse/pipeline/commit.yml @@ -0,0 +1,29 @@ +#@ load("job_def.lib.yml", +#@ "build_test_job", +#@ "centos6_gpdb6_conf", +#@ "centos7_gpdb6_conf", +#@ "rhel8_gpdb6_conf", +#@ "ubuntu18_gpdb6_conf") +#@ load("trigger_def.lib.yml", +#@ "commit_trigger", +#@ ) +#@ +#@ load("base.lib.yml", "declare_res", "declare_res_type") +#@ res_map = {} +#@ res_type_map = {} +#@ job_param = { +#@ "res_map": res_map, +#@ "trigger": commit_trigger(res_map), +#@ "gpdb_src": "gpdb6_src", +#@ "confs": [ +#@ centos6_gpdb6_conf(), +#@ centos7_gpdb6_conf(), +#@ rhel8_gpdb6_conf(), +#@ ubuntu18_gpdb6_conf()] +#@ } +jobs: +- #@ build_test_job(job_param) + +resources: #@ declare_res(res_type_map, res_map) + +resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml new file mode 100644 index 00000000000..a1979284981 --- /dev/null +++ b/concourse/pipeline/dev.yml @@ -0,0 +1,26 @@ +#@ load("job_def.lib.yml", +#@ "build_test_job", +#@ "centos6_gpdb6_conf", +#@ "centos7_gpdb6_conf", +#@ "rhel8_gpdb6_conf", +#@ "ubuntu18_gpdb6_conf") +#@ load("trigger_def.lib.yml", +#@ "commit_dev_trigger", +#@ ) +#@ +#@ load("base.lib.yml", "declare_res", "declare_res_type") +#@ res_map = {} +#@ res_type_map = {} +#@ job_param = { +#@ "res_map": res_map, +#@ "trigger": commit_dev_trigger(res_map), +#@ "gpdb_src": "gpdb6_src", +#@ "confs": [ +#@ ubuntu18_gpdb6_conf()] +#@ } +jobs: +- #@ build_test_job(job_param) + +resources: #@ declare_res(res_type_map, res_map) + +resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml new file mode 100644 index 00000000000..ac1abad61a8 --- /dev/null +++ b/concourse/pipeline/job_def.lib.yml @@ -0,0 +1,100 @@ +#@ load("base.lib.yml", "add_res_by_conf", "add_res_by_name") + +#! Job config for centos7 +#@ def centos6_gpdb6_conf(): +res_build_image: centos6-gpdb6-image-build +res_test_image: centos6-gpdb6-image-test +res_gpdb_bin: bin_gpdb6_centos6 +diskquota_os: rhel6 +#@ end + +#! Job config for centos7 +#@ def centos7_gpdb6_conf(): +res_build_image: centos7-gpdb6-image-build +res_test_image: centos7-gpdb6-image-test +res_gpdb_bin: bin_gpdb6_centos7 +diskquota_os: rhel7 +#@ end + +#! Job config for rhel8 +#@ def rhel8_gpdb6_conf(): +res_build_image: rhel8-gpdb6-image-build +res_test_image: rhel8-gpdb6-image-test +res_gpdb_bin: bin_gpdb6_rhel8 +diskquota_os: rhel8 +#@ end + +#! Job config for ubuntu18 +#@ def ubuntu18_gpdb6_conf(): +res_build_image: ubuntu18-gpdb6-image-build +res_test_image: ubuntu18-gpdb6-image-test +res_gpdb_bin: bin_gpdb6_ubuntu18 +diskquota_os: ubuntu18.04 +#@ end + +#@ def _build_task(conf): +task: #@ "build_" + conf["diskquota_os"] +file: diskquota_src/concourse/tasks/build_diskquota.yml +image: #@ conf["res_build_image"] +input_mapping: + bin_gpdb: #@ conf["res_gpdb_bin"] + diskquota_artifacts: diskquota_artifacts +#! output_mapping is necessary. Otherwise we may use a wrong +#! diskquota_bin in the test task. +output_mapping: + "diskquota_artifacts": #@ "diskquota_artifacts_" + conf["diskquota_os"] +params: + DISKQUOTA_OS: #@ conf["diskquota_os"] +#@ end + +#@ def _test_task(conf): +task: #@ "test_" + conf["diskquota_os"] +timeout: 1h +file: diskquota_src/concourse/tasks/test_diskquota.yml +image: #@ conf["res_test_image"] +input_mapping: + bin_gpdb: #@ conf["res_gpdb_bin"] + bin_diskquota: #@ "diskquota_artifacts_" + conf["diskquota_os"] +params: + DISKQUOTA_OS: #@ conf["diskquota_os"] +#@ end + +#@ def build_test_job(param): +#@ res_map = param["res_map"] +#@ trigger = param["trigger"] +#@ confs = param["confs"] +#@ add_res_by_name(res_map, param["gpdb_src"]) +name: build_test +max_in_flight: 10 +on_success: #@ trigger["on_success"] +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +plan: +- #@ trigger["plan"] +- in_parallel: + - get: gpdb_src + resource: #@ param["gpdb_src"] +#@ for conf in confs: +#@ add_res_by_conf(res_map, conf) +#@ if conf["res_build_image"] == conf["res_test_image"]: + - get: #@ conf["res_build_image"] +#@ else: + - get: #@ conf["res_build_image"] + - get: #@ conf["res_test_image"] +#@ end + - get: #@ conf["res_gpdb_bin"] +#@ end +#@ if len(confs) == 1: +#@ conf = confs[0] +- #@ _build_task(conf) +- #@ _test_task(conf) +#@ else: +- in_parallel: + steps: +#@ for conf in confs: + - do: + - #@ _build_task(conf) + - #@ _test_task(conf) +#@ end +#@ end +#@ end diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml new file mode 100644 index 00000000000..1ce668cc3f5 --- /dev/null +++ b/concourse/pipeline/pr.yml @@ -0,0 +1,24 @@ +#@ load("job_def.lib.yml", +#@ "build_test_job", +#@ "centos7_gpdb6_conf", +#@ ) +#@ load("trigger_def.lib.yml", +#@ "pr_trigger", +#@ ) +#@ load("base.lib.yml", +#@ "declare_res", +#@ "declare_res_type") +#@ res_map = {} +#@ res_type_map = {} +#@ job_param = { +#@ "res_map": res_map, +#@ "gpdb_src": "gpdb6_src", +#@ "trigger": pr_trigger(res_map), +#@ "confs": [centos7_gpdb6_conf()] +#@ } +jobs: +- #@ build_test_job(job_param) + +resources: #@ declare_res(res_type_map, res_map) + +resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml new file mode 100644 index 00000000000..d4375e4b070 --- /dev/null +++ b/concourse/pipeline/res_def.yml @@ -0,0 +1,129 @@ +resource_types: +- name: gcs + type: registry-image + check_every: 1h + source: + repository: frodenas/gcs-resource + +- name: pull-request + type: docker-image + check_every: 1h + source: + repository: teliaoss/github-pr-resource + +resources: +# Pull Request +- name: diskquota_pr + type: pull-request + # We should rely on the webhook. See README if webhook doesn't work + webhook_token: ((diskquota-webhook-token)) + check_every: 24h + source: + disable_forks: false + repository: greenplum-db/diskquota + access_token: ((github-access-token)) + base_branch: gpdb +# Commit trigger +- name: diskquota_commit + type: git + # We should rely on the webhook. See README if webhook doesn't work + webhook_token: ((diskquota-webhook-token)) + check_every: 24h + source: + branch: ((diskquota-branch)) + uri: https://github.com/greenplum-db/diskquota.git + username: ((github-access-token)) + password: x-oauth-basic +# Commit dev trigger. Not using webhook +- name: diskquota_commit_dev + type: git + check_every: 1m + source: + branch: ((diskquota-branch)) + uri: https://github.com/greenplum-db/diskquota.git + username: ((github-access-token)) + password: x-oauth-basic + + +# Greenplum sources +- name: gpdb6_src + type: git + source: + branch: 6X_STABLE + uri: https://github.com/greenplum-db/gpdb.git + +# Image Resources +# centos6 +- name: centos6-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-build + tag: latest +- name: centos6-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-test + tag: latest +# centos7 +- name: centos7-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build + tag: latest +- name: centos7-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test + tag: latest +# rhel8 +- name: rhel8-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) +- name: rhel8-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test + tag: latest + username: _json_key + password: ((container-registry-readonly-service-account-key)) +# Ubuntu18 +- name: ubuntu18-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build + tag: latest +- name: ubuntu18-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test + tag: latest + +#! gpdb binary on gcs is located as different folder for different version +- name: bin_gpdb6_centos6 + type: gcs + source: + bucket: ((gcs-bucket-intermediates)) + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: 6X_STABLE/bin_gpdb_centos6/bin_gpdb.tar.gz +- name: bin_gpdb6_centos7 + type: gcs + source: + bucket: ((gcs-bucket-intermediates)) + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: 6X_STABLE/bin_gpdb_centos7/bin_gpdb.tar.gz +- name: bin_gpdb6_ubuntu18 + type: gcs + source: + bucket: ((gcs-bucket-intermediates)) + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: 6X_STABLE/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz +- name: bin_gpdb6_rhel8 + type: gcs + source: + bucket: ((gcs-bucket-intermediates)) + json_key: ((concourse-gcs-resources-service-account-key)) + versioned_file: 6X_STABLE/bin_gpdb_rhel8/bin_gpdb.tar.gz diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml new file mode 100644 index 00000000000..ad1928be38b --- /dev/null +++ b/concourse/pipeline/trigger_def.lib.yml @@ -0,0 +1,56 @@ +#@ load("base.lib.yml", "add_res_by_name") + +#! PR trigger. For pull request pipelines +#@ def pr_trigger(res_map): +#@ add_res_by_name(res_map, "diskquota_pr") +plan: + get: diskquota_src + resource: diskquota_pr + params: + fetch_tags: true + trigger: true +on_failure: + put: diskquota_pr + params: + path: diskquota_src + status: failure +on_error: + put: diskquota_pr + params: + path: diskquota_src + status: failure +on_success: + put: diskquota_pr + params: + path: diskquota_src + status: success +#@ end + +#! Commit trigger. For master pipelines +#@ def commit_trigger(res_map): +#@ add_res_by_name(res_map, "diskquota_commit") +plan: + get: diskquota_src + resource: diskquota_commit + trigger: true +#! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. +#! Unfortunately it doesn't work with Concourse 5. +on_success: +on_failure: +on_error: +#@ end + +#! Commit trigger. For dev pipelines. No webhook +#@ def commit_dev_trigger(res_map): +#@ add_res_by_name(res_map, "diskquota_commit_dev") +plan: + get: diskquota_src + resource: diskquota_commit_dev + trigger: true +#! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. +#! Unfortunately it doesn't work with Concourse 5. +on_success: +on_failure: +on_error: +#@ end + diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index d074ab76420..15eb94eb800 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -20,43 +20,30 @@ function pkg() { pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component + install_files=( \ + "lib/postgresql/diskquota.so" \ + "share/postgresql/extension/diskquota.control" \ + "share/postgresql/extension/diskquota--1.0.sql" \ + "share/postgresql/extension/diskquota--2.0.sql" \ + "share/postgresql/extension/diskquota--1.0--2.0.sql" \ + "share/postgresql/extension/diskquota--2.0--1.0.sql" \ + "install_gpdb_component") case "$DISKQUOTA_OS" in rhel6) tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel6_x86_64.tar.gz \ - lib/postgresql/diskquota.so \ - share/postgresql/extension/diskquota.control \ - share/postgresql/extension/diskquota--1.0.sql \ - share/postgresql/extension/diskquota--2.0.sql \ - share/postgresql/extension/diskquota--1.0--2.0.sql \ - share/postgresql/extension/diskquota--2.0--1.0.sql \ - install_gpdb_component + "${install_files[@]}" ;; rhel7) tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel7_x86_64.tar.gz \ - lib/postgresql/diskquota.so \ - share/postgresql/extension/diskquota.control \ - share/postgresql/extension/diskquota--1.0.sql \ - share/postgresql/extension/diskquota--2.0.sql \ - share/postgresql/extension/diskquota--1.0--2.0.sql \ - share/postgresql/extension/diskquota--2.0--1.0.sql \ - install_gpdb_component + "${install_files[@]}" ;; rhel8) tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel8_x86_64.tar.gz \ - lib/postgresql/diskquota.so \ - share/postgresql/extension/diskquota.control \ - share/postgresql/extension/diskquota--1.0.sql \ - install_gpdb_component + "${install_files[@]}" ;; ubuntu18.04) tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-ubuntu18.04_x86_64.tar.gz \ - lib/postgresql/diskquota.so \ - share/postgresql/extension/diskquota.control \ - share/postgresql/extension/diskquota--1.0.sql \ - share/postgresql/extension/diskquota--2.0.sql \ - share/postgresql/extension/diskquota--1.0--2.0.sql \ - share/postgresql/extension/diskquota--2.0--1.0.sql \ - install_gpdb_component + "${install_files[@]}" ;; *) echo "Unknown OS: $DISKQUOTA_OS"; exit 1 ;; esac diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index fec377dc531..eba67ba9c90 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -17,7 +17,6 @@ source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" function create_fake_gpdb_src() { pushd gpdb_src ./configure --prefix=/usr/local/greenplum-db-devel \ - --with-perl --with-python --with-libxml \ --without-zstd \ --disable-orca --disable-gpcloud --enable-debug-extensions popd From 9ca7194deae05b773e3cfa37f697c1b8329c36b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Mon, 21 Feb 2022 19:29:23 +0800 Subject: [PATCH 128/330] Specify DISTRIBUTED BY when CREATE TABLE (#146) When the distribution policy is not specified explicitly, ORCA distributes data randomly when CREATE TABLE. This can make the size of the tables created different from time to time. This patch fixes the issue by adding DISTRIBUTED BY for each CREATE TABLE to avoid random distribution. --- README.md | 8 ++++---- diskquota--1.0.sql | 4 ++-- diskquota--2.0.sql | 6 +++--- tests/isolation2/expected/test_blackmap.out | 18 +++++++++--------- .../isolation2/expected/test_relation_size.out | 4 ++-- tests/isolation2/expected/test_truncate.out | 2 +- tests/isolation2/expected/test_vacuum.out | 2 +- tests/isolation2/sql/test_blackmap.sql | 18 +++++++++--------- tests/isolation2/sql/test_relation_size.sql | 4 ++-- tests/isolation2/sql/test_truncate.sql | 2 +- tests/isolation2/sql/test_vacuum.sql | 2 +- tests/regress/expected/test_appendonly.out | 4 ++-- tests/regress/expected/test_blackmap.out | 10 +++++----- tests/regress/expected/test_column.out | 2 +- tests/regress/expected/test_copy.out | 2 +- tests/regress/expected/test_ctas_pause.out | 4 ++-- tests/regress/expected/test_ctas_role.out | 8 ++++---- tests/regress/expected/test_ctas_schema.out | 8 ++++---- .../expected/test_ctas_tablespace_role.out | 10 +++++----- .../expected/test_ctas_tablespace_schema.out | 10 +++++----- tests/regress/expected/test_delete_quota.out | 2 +- .../regress/expected/test_drop_after_pause.out | 2 +- tests/regress/expected/test_drop_table.out | 4 ++-- tests/regress/expected/test_extension.out | 18 +++++++++--------- .../regress/expected/test_fast_disk_check.out | 2 +- .../regress/expected/test_fetch_table_stat.out | 2 +- tests/regress/expected/test_index.out | 2 +- .../expected/test_insert_after_drop.out | 2 +- .../regress/expected/test_pause_and_resume.out | 2 +- .../test_pause_and_resume_multiple_db.out | 6 +++--- .../regress/expected/test_primary_failure.out | 2 +- tests/regress/expected/test_relation_cache.out | 8 ++++---- tests/regress/expected/test_relation_size.out | 6 +++--- tests/regress/expected/test_rename.out | 4 ++-- tests/regress/expected/test_reschema.out | 2 +- tests/regress/expected/test_role.out | 4 ++-- tests/regress/expected/test_schema.out | 6 +++--- tests/regress/expected/test_table_size.out | 4 ++-- .../regress/expected/test_tablespace_role.out | 4 ++-- .../expected/test_tablespace_role_perseg.out | 2 +- .../expected/test_tablespace_schema.out | 4 ++-- .../expected/test_tablespace_schema_perseg.out | 2 +- tests/regress/expected/test_temp_role.out | 2 +- tests/regress/expected/test_toast.out | 2 +- tests/regress/expected/test_truncate.out | 4 ++-- .../expected/test_uncommitted_table_size.out | 12 ++++++------ tests/regress/expected/test_update.out | 2 +- tests/regress/expected/test_vacuum.out | 4 ++-- tests/regress/sql/test_appendonly.sql | 4 ++-- tests/regress/sql/test_blackmap.sql | 10 +++++----- tests/regress/sql/test_column.sql | 2 +- tests/regress/sql/test_copy.sql | 2 +- tests/regress/sql/test_ctas_pause.sql | 4 ++-- tests/regress/sql/test_ctas_role.sql | 8 ++++---- tests/regress/sql/test_ctas_schema.sql | 8 ++++---- .../regress/sql/test_ctas_tablespace_role.sql | 10 +++++----- .../sql/test_ctas_tablespace_schema.sql | 10 +++++----- tests/regress/sql/test_delete_quota.sql | 2 +- tests/regress/sql/test_drop_after_pause.sql | 2 +- tests/regress/sql/test_drop_table.sql | 4 ++-- tests/regress/sql/test_extension.sql | 18 +++++++++--------- tests/regress/sql/test_fast_disk_check.sql | 2 +- tests/regress/sql/test_fetch_table_stat.sql | 2 +- tests/regress/sql/test_index.sql | 2 +- tests/regress/sql/test_insert_after_drop.sql | 2 +- tests/regress/sql/test_pause_and_resume.sql | 2 +- .../sql/test_pause_and_resume_multiple_db.sql | 6 +++--- tests/regress/sql/test_primary_failure.sql | 2 +- tests/regress/sql/test_relation_cache.sql | 8 ++++---- tests/regress/sql/test_relation_size.sql | 6 +++--- tests/regress/sql/test_rename.sql | 4 ++-- tests/regress/sql/test_reschema.sql | 2 +- tests/regress/sql/test_role.sql | 4 ++-- tests/regress/sql/test_schema.sql | 6 +++--- tests/regress/sql/test_table_size.sql | 4 ++-- tests/regress/sql/test_tablespace_role.sql | 4 ++-- .../sql/test_tablespace_role_perseg.sql | 2 +- tests/regress/sql/test_tablespace_schema.sql | 4 ++-- .../sql/test_tablespace_schema_perseg.sql | 2 +- tests/regress/sql/test_temp_role.sql | 2 +- tests/regress/sql/test_toast.sql | 2 +- tests/regress/sql/test_truncate.sql | 4 ++-- .../sql/test_uncommitted_table_size.sql | 12 ++++++------ tests/regress/sql/test_update.sql | 2 +- tests/regress/sql/test_vacuum.sql | 4 ++-- upgrade_test/expected/prepare.out | 4 ++-- upgrade_test/expected/set_config.out | 4 ++-- upgrade_test/expected/test_delete_quota.out | 2 +- upgrade_test/expected/test_rename.out | 4 ++-- upgrade_test/expected/test_reschema.out | 2 +- upgrade_test/expected/test_role.out | 4 ++-- upgrade_test/expected/test_schema.out | 4 ++-- upgrade_test/expected/test_temp_role.out | 2 +- upgrade_test/sql/prepare.sql | 4 ++-- upgrade_test/sql/set_config.sql | 4 ++-- upgrade_test/sql/test_delete_quota.sql | 2 +- upgrade_test/sql/test_rename.sql | 4 ++-- upgrade_test/sql/test_reschema.sql | 2 +- upgrade_test/sql/test_role.sql | 4 ++-- upgrade_test/sql/test_schema.sql | 4 ++-- upgrade_test/sql/test_temp_role.sql | 2 +- 101 files changed, 235 insertions(+), 235 deletions(-) diff --git a/README.md b/README.md index 188c7276799..c99d7b1cced 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,7 @@ create schema s1; select diskquota.set_schema_quota('s1', '1 MB'); set search_path to s1; -create table a(i int); +create table a(i int) DISTRIBUTED BY (i); # insert small data succeeded insert into a select generate_series(1,100); # insert large data failed @@ -145,7 +145,7 @@ reset search_path; 2. Set/update/delete role quota limit using diskquota.set_role_quota ``` create role u1 nologin; -create table b (i int); +create table b (i int) DISTRIBUTED BY (i); alter table b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); @@ -214,9 +214,9 @@ and do enfocement accordingly in later queries. ``` # suppose quota of schema s1 is 1MB. set search_path to s1; -create table b; +create table b (i int) DISTRIBUTED BY (i); BEGIN; -create table a; +create table a (i int) DISTRIBUTED BY (i); # Issue: quota enforcement doesn't work on table a insert into a select generate_series(1,200000); # quota enforcement works on table b diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 7ce807e6583..6d950849cd0 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -6,7 +6,7 @@ CREATE SCHEMA diskquota; -- Configuration table -CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); +CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)) DISTRIBUTED BY (targetOid, quotatype); SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); @@ -23,7 +23,7 @@ LANGUAGE C; CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); -CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); +CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)) DISTRIBUTED BY (state); INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 057e9beaca5..34a9ee811b1 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -6,7 +6,7 @@ CREATE SCHEMA diskquota; -- Configuration table -CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, segratio float4 DEFAULT -1, PRIMARY KEY(targetOid, quotatype)); +CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, segratio float4 DEFAULT -1, PRIMARY KEY(targetOid, quotatype)) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( quotatype int, --REFERENCES disquota.quota_config.quotatype, @@ -62,9 +62,9 @@ LANGUAGE C; CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; -CREATE TABLE diskquota.table_size (tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid)); +CREATE TABLE diskquota.table_size (tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid)) DISTRIBUTED BY (tableid, segid); -CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); +CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)) DISTRIBUTED BY (state); INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out index 25101fca9d7..11d5afff176 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_blackmap.out @@ -278,7 +278,7 @@ CREATE -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -323,7 +323,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -368,7 +368,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -413,7 +413,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -458,7 +458,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -503,7 +503,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -548,7 +548,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 13. Test that we are able to block a toast relation on seg0 by its namespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i text); +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -595,7 +595,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true); +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file @@ -643,7 +643,7 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: -- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. 1: BEGIN; BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column); +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); dump_relation_cache_to_file diff --git a/tests/isolation2/expected/test_relation_size.out b/tests/isolation2/expected/test_relation_size.out index 387f86555ae..b1dc3401a8d 100644 --- a/tests/isolation2/expected/test_relation_size.out +++ b/tests/isolation2/expected/test_relation_size.out @@ -4,7 +4,7 @@ -- the error and returns 0. -- -CREATE TABLE t_dropped(i int); +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); CREATE -- Insert a small amount of data to 't_dropped'. INSERT INTO t_dropped SELECT generate_series(1, 100); @@ -51,7 +51,7 @@ SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', db -- Since no row is deleted, diskquota.relation_size() should be equal to -- pg_relation_size(). -CREATE TABLE t_ao(i int) WITH (appendonly=true); +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE 1: BEGIN; BEGIN diff --git a/tests/isolation2/expected/test_truncate.out b/tests/isolation2/expected/test_truncate.out index dd195548485..d176b404eda 100644 --- a/tests/isolation2/expected/test_truncate.out +++ b/tests/isolation2/expected/test_truncate.out @@ -1,7 +1,7 @@ -- Test various race conditions for TRUNCATE. -- Case 1: Pulling active table before swapping relfilenode -CREATE TABLE dummy_t1(i int); +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); CREATE INSERT INTO dummy_t1 SELECT generate_series(1, 1000); INSERT 1000 diff --git a/tests/isolation2/expected/test_vacuum.out b/tests/isolation2/expected/test_vacuum.out index 978aaf2c446..47eb944d968 100644 --- a/tests/isolation2/expected/test_vacuum.out +++ b/tests/isolation2/expected/test_vacuum.out @@ -13,7 +13,7 @@ -- the old relation's size cannot be updated. We resolve it by making altered relations' oids -- constantly active so that the diskquota bgworker keeps updating the altered relation size -- during 'VACUUM FULL'. -CREATE TABLE dummy_t1(i int); +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); CREATE INSERT INTO dummy_t1 SELECT generate_series(1, 1000); INSERT 1000 diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_blackmap.sql index f95c5380237..1e7cd74b28e 100644 --- a/tests/isolation2/sql/test_blackmap.sql +++ b/tests/isolation2/sql/test_blackmap.sql @@ -283,7 +283,7 @@ LANGUAGE 'plpgsql'; -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -308,7 +308,7 @@ SELECT diskquota.refresh_blackmap( -- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -333,7 +333,7 @@ SELECT diskquota.refresh_blackmap( -- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -358,7 +358,7 @@ SELECT diskquota.refresh_blackmap( -- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -383,7 +383,7 @@ SELECT diskquota.refresh_blackmap( -- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -408,7 +408,7 @@ SELECT diskquota.refresh_blackmap( -- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). 1: BEGIN; -1: CREATE TABLE blocked_t7(i int); +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -433,7 +433,7 @@ SELECT diskquota.refresh_blackmap( -- 13. Test that we are able to block a toast relation on seg0 by its namespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i text); +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -460,7 +460,7 @@ SELECT diskquota.refresh_blackmap( -- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true); +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) @@ -487,7 +487,7 @@ SELECT diskquota.refresh_blackmap( -- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. 1: BEGIN; -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column); +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); 1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); -- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) diff --git a/tests/isolation2/sql/test_relation_size.sql b/tests/isolation2/sql/test_relation_size.sql index d06cdfb4dfc..4ccf61b104a 100644 --- a/tests/isolation2/sql/test_relation_size.sql +++ b/tests/isolation2/sql/test_relation_size.sql @@ -4,7 +4,7 @@ -- the error and returns 0. -- -CREATE TABLE t_dropped(i int); +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); -- Insert a small amount of data to 't_dropped'. INSERT INTO t_dropped SELECT generate_series(1, 100); -- Shows that the size of relfilenode is not zero. @@ -28,7 +28,7 @@ SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', db -- Since no row is deleted, diskquota.relation_size() should be equal to -- pg_relation_size(). -CREATE TABLE t_ao(i int) WITH (appendonly=true); +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); 1: BEGIN; 1: INSERT INTO t_ao SELECT generate_series(1, 10000); 2: BEGIN; diff --git a/tests/isolation2/sql/test_truncate.sql b/tests/isolation2/sql/test_truncate.sql index 957d554e8f6..5bce332053f 100644 --- a/tests/isolation2/sql/test_truncate.sql +++ b/tests/isolation2/sql/test_truncate.sql @@ -1,7 +1,7 @@ -- Test various race conditions for TRUNCATE. -- Case 1: Pulling active table before swapping relfilenode -CREATE TABLE dummy_t1(i int); +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); INSERT INTO dummy_t1 SELECT generate_series(1, 1000); -- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/isolation2/sql/test_vacuum.sql b/tests/isolation2/sql/test_vacuum.sql index c724e50a8d8..cf46bb40ddf 100644 --- a/tests/isolation2/sql/test_vacuum.sql +++ b/tests/isolation2/sql/test_vacuum.sql @@ -13,7 +13,7 @@ -- the old relation's size cannot be updated. We resolve it by making altered relations' oids -- constantly active so that the diskquota bgworker keeps updating the altered relation size -- during 'VACUUM FULL'. -CREATE TABLE dummy_t1(i int); +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); INSERT INTO dummy_t1 SELECT generate_series(1, 1000); DELETE FROM dummy_t1; -- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index 5775a0de4fe..b2802aa9b5c 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -1,10 +1,10 @@ -- Create new schema for running tests. CREATE SCHEMA s_appendonly; SET search_path TO s_appendonly; -CREATE TABLE t_ao(i int) WITH (appendonly=true); +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_blackmap.out index eae4fe65634..9c9470bb079 100644 --- a/tests/regress/expected/test_blackmap.out +++ b/tests/regress/expected/test_blackmap.out @@ -56,7 +56,7 @@ LANGUAGE 'plpgsql'; -- 1. Create an ordinary table and add its oid to blackmap on seg0. -- Check that it's relfilenode is blocked on seg0 by variouts conditions. -- -CREATE TABLE blocked_t1(i int); +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Insert an entry for blocked_t1 to blackmap on seg0. @@ -139,7 +139,7 @@ SELECT rel.relname, be.target_type, -- 2. Test that the relfilenodes of toast relation together with its -- index are blocked on seg0. -- -CREATE TABLE blocked_t2(i text); +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Insert an entry for blocked_t2 to blackmap on seg0. @@ -169,7 +169,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t3(i int) WITH (appendonly=true); +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t3_index ON blocked_t3(i); @@ -203,7 +203,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column); +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t4_index ON blocked_t4(i); @@ -237,7 +237,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column); +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t5_index ON blocked_t5(i); diff --git a/tests/regress/expected/test_column.out b/tests/regress/expected/test_column.out index 79e4450ffdb..dd07de93e88 100644 --- a/tests/regress/expected/test_column.out +++ b/tests/regress/expected/test_column.out @@ -13,7 +13,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -CREATE TABLE a2(i INT); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect fail diff --git a/tests/regress/expected/test_copy.out b/tests/regress/expected/test_copy.out index bebe959dbef..b0cde72fe9a 100644 --- a/tests/regress/expected/test_copy.out +++ b/tests/regress/expected/test_copy.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('s3', '1 MB'); SET search_path TO s3; \! seq 100 > /tmp/csmall.txt -CREATE TABLE c (i int); +CREATE TABLE c (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. COPY c FROM '/tmp/csmall.txt'; diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out index ae68b31acf9..e3edc4395e9 100644 --- a/tests/regress/expected/test_ctas_pause.out +++ b/tests/regress/expected/test_ctas_pause.out @@ -15,7 +15,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- heap table -CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect fail NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ERROR: schema's disk space quota exceeded with name:110528 (seg1 127.0.0.1:6003 pid=73892) @@ -25,7 +25,7 @@ SELECT diskquota.pause(); (1 row) -CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect succeed NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- disable hardlimit and do some clean-ups. diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index 07c79dc6eae..db688a3fd73 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -12,7 +12,7 @@ SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 10000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 10000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded @@ -23,7 +23,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- temp table -CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); +CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded @@ -34,7 +34,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- toast table -CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded @@ -45,7 +45,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded diff --git a/tests/regress/expected/test_ctas_schema.out b/tests/regress/expected/test_ctas_schema.out index 81a1fb613ed..157ec6181ac 100644 --- a/tests/regress/expected/test_ctas_schema.out +++ b/tests/regress/expected/test_ctas_schema.out @@ -10,7 +10,7 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SET search_path TO hardlimit_s; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded @@ -21,8 +21,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded @@ -33,7 +33,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out index bb551201223..6443c3bd585 100644 --- a/tests/regress/expected/test_ctas_tablespace_role.out +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -21,7 +21,7 @@ SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB' SET default_tablespace = ctas_rolespc; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded @@ -32,8 +32,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded @@ -44,7 +44,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded @@ -56,7 +56,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded diff --git a/tests/regress/expected/test_ctas_tablespace_schema.out b/tests/regress/expected/test_ctas_tablespace_schema.out index 5d62898953b..58fdaac36bb 100644 --- a/tests/regress/expected/test_ctas_tablespace_schema.out +++ b/tests/regress/expected/test_ctas_tablespace_schema.out @@ -18,7 +18,7 @@ SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 SET search_path TO hardlimit_s; SET default_tablespace = ctas_schemaspc; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded @@ -29,8 +29,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded @@ -41,7 +41,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded @@ -53,7 +53,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded diff --git a/tests/regress/expected/test_delete_quota.out b/tests/regress/expected/test_delete_quota.out index aca3418e315..76ae43d429e 100644 --- a/tests/regress/expected/test_delete_quota.out +++ b/tests/regress/expected/test_delete_quota.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); (1 row) SET search_path TO deleteschema; -CREATE TABLE c (i INT); +CREATE TABLE c (i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect failed diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 26451fd799e..71c49c20494 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -30,7 +30,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); diff --git a/tests/regress/expected/test_drop_table.out b/tests/regress/expected/test_drop_table.out index 31ac1879184..a91d6d520d6 100644 --- a/tests/regress/expected/test_drop_table.out +++ b/tests/regress/expected/test_drop_table.out @@ -7,10 +7,10 @@ SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); (1 row) SET search_path TO sdrtbl; -CREATE TABLE a(i INT); +CREATE TABLE a(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE a2(i INT); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 8f56ce29db7..01353a86882 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -32,7 +32,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -53,7 +53,7 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx1 CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO SX.a values(generate_series(0, 100000)); @@ -93,7 +93,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 5 CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -123,7 +123,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -153,7 +153,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -183,7 +183,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -213,7 +213,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -243,7 +243,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -273,7 +273,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); diff --git a/tests/regress/expected/test_fast_disk_check.out b/tests/regress/expected/test_fast_disk_check.out index 5178157235c..d309df39467 100644 --- a/tests/regress/expected/test_fast_disk_check.out +++ b/tests/regress/expected/test_fast_disk_check.out @@ -1,7 +1,7 @@ -- Test SCHEMA CREATE SCHEMA s1; SET search_path to s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,200000); diff --git a/tests/regress/expected/test_fetch_table_stat.out b/tests/regress/expected/test_fetch_table_stat.out index 45f1a7e97ef..47d6bf313a3 100644 --- a/tests/regress/expected/test_fetch_table_stat.out +++ b/tests/regress/expected/test_fetch_table_stat.out @@ -2,7 +2,7 @@ -- 1. Test that when an error occurs in diskquota_fetch_table_stat -- the error message is preserved for us to debug. -- -CREATE TABLE t_error_handling (i int); +CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Inject an error to a segment server, since this UDF is only called on segments. diff --git a/tests/regress/expected/test_index.out b/tests/regress/expected/test_index.out index dd66f0921c1..f1077c9643b 100644 --- a/tests/regress/expected/test_index.out +++ b/tests/regress/expected/test_index.out @@ -7,7 +7,7 @@ DROP TABLESPACE IF EXISTS indexspc; NOTICE: tablespace "indexspc" does not exist, skipping CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; SET search_path TO indexschema1; -CREATE TABLE test_index_a(i int) TABLESPACE indexspc; +CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO test_index_a SELECT generate_series(1,20000); diff --git a/tests/regress/expected/test_insert_after_drop.out b/tests/regress/expected/test_insert_after_drop.out index 49440f46f0d..1a1fe8f6970 100644 --- a/tests/regress/expected/test_insert_after_drop.out +++ b/tests/regress/expected/test_insert_after_drop.out @@ -10,7 +10,7 @@ SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); (1 row) SET search_path TO sdrtbl; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_pause_and_resume.out b/tests/regress/expected/test_pause_and_resume.out index a2ecfd94c26..986f59e4c8a 100644 --- a/tests/regress/expected/test_pause_and_resume.out +++ b/tests/regress/expected/test_pause_and_resume.out @@ -1,7 +1,7 @@ -- Test pause and resume. CREATE SCHEMA s1; SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert succeed diff --git a/tests/regress/expected/test_pause_and_resume_multiple_db.out b/tests/regress/expected/test_pause_and_resume_multiple_db.out index d7ce4f66c8d..f501c91181a 100644 --- a/tests/regress/expected/test_pause_and_resume_multiple_db.out +++ b/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -14,12 +14,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) \c contrib_regression -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed \c test_pause_and_resume -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed @@ -110,7 +110,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active a t (1 row) -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed diff --git a/tests/regress/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out index f9b7c779a59..930148fc108 100644 --- a/tests/regress/expected/test_primary_failure.out +++ b/tests/regress/expected/test_primary_failure.out @@ -30,7 +30,7 @@ returns text as $$ cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_relation_cache.out b/tests/regress/expected/test_relation_cache.out index 862cf512aa8..52a3efb45ff 100644 --- a/tests/regress/expected/test_relation_cache.out +++ b/tests/regress/expected/test_relation_cache.out @@ -12,7 +12,7 @@ end; $$ LANGUAGE plpgsql; -- heap table begin; -create table t(i int); +create table t(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1, 100000); @@ -38,7 +38,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; -- toast table begin; -create table t(t text); +create table t(t text) DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); @@ -70,7 +70,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; -- AO table begin; -create table t(a int, b text) with(appendonly=true); +create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; @@ -102,7 +102,7 @@ select count(*) from diskquota.show_relation_cache_all_seg(); drop table t; -- AOCS table begin; -create table t(a int, b text) with(appendonly=true, orientation=column); +create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; diff --git a/tests/regress/expected/test_relation_size.out b/tests/regress/expected/test_relation_size.out index 49292c9ccf1..69aa64a79f7 100644 --- a/tests/regress/expected/test_relation_size.out +++ b/tests/regress/expected/test_relation_size.out @@ -14,7 +14,7 @@ SELECT pg_table_size('t1'); 688128 (1 row) -CREATE TABLE t2(i int); +CREATE TABLE t2(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 10000); @@ -66,7 +66,7 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; -CREATE TABLE ao (i int) WITH (appendonly=true); +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO ao SELECT generate_series(1, 10000); @@ -83,7 +83,7 @@ SELECT pg_relation_size('ao'); (1 row) DROP TABLE ao; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; diff --git a/tests/regress/expected/test_rename.out b/tests/regress/expected/test_rename.out index 53e69a03873..e8a2fcc6e89 100644 --- a/tests/regress/expected/test_rename.out +++ b/tests/regress/expected/test_rename.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('srs1', '1 MB'); (1 row) set search_path to srs1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail @@ -45,7 +45,7 @@ SELECT diskquota.set_role_quota('srerole', '1MB'); (1 row) SET search_path TO srr1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO srerole; diff --git a/tests/regress/expected/test_reschema.out b/tests/regress/expected/test_reschema.out index 3fe38be06d5..7d1557715ba 100644 --- a/tests/regress/expected/test_reschema.out +++ b/tests/regress/expected/test_reschema.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('srE', '1 MB'); (1 row) SET search_path TO srE; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index 69f89ed33ff..9507912e9b6 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -5,11 +5,11 @@ CREATE ROLE u1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE u2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT); +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO u1; diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index 030b78a539b..a9ca3e887b4 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -1,7 +1,7 @@ -- Test schema CREATE SCHEMA s1; SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); @@ -22,7 +22,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 -CREATE TABLE a2(i int); +CREATE TABLE a2(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail @@ -47,7 +47,7 @@ DROP ROLE IF EXISTS testbody; NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE badquota.t1(i INT); +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badquota.t1 OWNER TO testbody; diff --git a/tests/regress/expected/test_table_size.out b/tests/regress/expected/test_table_size.out index 4c96b5dc4bc..e953b445d1e 100644 --- a/tests/regress/expected/test_table_size.out +++ b/tests/regress/expected/test_table_size.out @@ -1,5 +1,5 @@ -- Test tablesize table -create table a(i text); +create table a(i text) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into a select * from generate_series(1,10000); @@ -9,7 +9,7 @@ select pg_sleep(2); (1 row) -create table buffer(oid oid, relname name, size bigint); +create table buffer(oid oid, relname name, size bigint) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index 6420f47b91e..a289f94ec9e 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -15,10 +15,10 @@ CREATE ROLE rolespcu1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespcu2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc; +CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO rolespcu1; diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index 709aa0f3c6f..d092a7eb3a1 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -15,7 +15,7 @@ CREATE ROLE rolespc_persegu1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespc_persegu2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO rolespc_persegu1; diff --git a/tests/regress/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out index f8e96a922fd..00600fa2502 100644 --- a/tests/regress/expected/test_tablespace_schema.out +++ b/tests/regress/expected/test_tablespace_schema.out @@ -7,7 +7,7 @@ DROP TABLESPACE IF EXISTS schemaspc; NOTICE: tablespace "schemaspc" does not exist, skipping CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; SET search_path TO spcs1; -CREATE TABLE a(i int) TABLESPACE schemaspc; +CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); @@ -28,7 +28,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded -CREATE TABLE a2(i int) TABLESPACE schemaspc; +CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index 2a0c73daa4d..f2e3f3d508b 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -14,7 +14,7 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', (1 row) SET search_path TO spcs1_perseg; -CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); diff --git a/tests/regress/expected/test_temp_role.out b/tests/regress/expected/test_temp_role.out index 0bf1813f8a6..35d1a140378 100644 --- a/tests/regress/expected/test_temp_role.out +++ b/tests/regress/expected/test_temp_role.out @@ -9,7 +9,7 @@ SELECT diskquota.set_role_quota('u3temp', '1MB'); (1 row) -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO u3temp; diff --git a/tests/regress/expected/test_toast.out b/tests/regress/expected/test_toast.out index 7b0e7fbf8ab..d30ce8f7bd7 100644 --- a/tests/regress/expected/test_toast.out +++ b/tests/regress/expected/test_toast.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('s5', '1 MB'); (1 row) SET search_path TO s5; -CREATE TABLE a5 (message text); +CREATE TABLE a5 (t text) DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'message' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a5 diff --git a/tests/regress/expected/test_truncate.out b/tests/regress/expected/test_truncate.out index a863b0bf238..59a51e98fb6 100644 --- a/tests/regress/expected/test_truncate.out +++ b/tests/regress/expected/test_truncate.out @@ -7,10 +7,10 @@ SELECT diskquota.set_schema_quota('s7', '1 MB'); (1 row) SET search_path TO s7; -CREATE TABLE a (i int); +CREATE TABLE a (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE b (i int); +CREATE TABLE b (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index 4fa4993f623..8cbc401931f 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -26,7 +26,7 @@ commit; DROP table t1; -- heap table begin; -CREATE TABLE t2(i int); +CREATE TABLE t2(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 100000); @@ -74,7 +74,7 @@ commit; DROP table t2; -- toast table begin; -CREATE TABLE t3(t text); +CREATE TABLE t3(t text) DISTRIBUTED BY (t); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); @@ -100,7 +100,7 @@ commit; DROP table t3; -- AO table begin; -CREATE TABLE ao (i int) WITH (appendonly=true); +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO ao SELECT generate_series(1, 100000); @@ -155,7 +155,7 @@ commit; DROP TABLE ao; -- AO table CTAS begin; -CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); +CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.wait_for_worker_new_epoch(); @@ -175,7 +175,7 @@ commit; DROP TABLE ao; -- AOCS table begin; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; @@ -223,7 +223,7 @@ commit; DROP TABLE aocs; -- AOCS table CTAS begin; -CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/expected/test_update.out b/tests/regress/expected/test_update.out index 1dedcd1b130..58d2534d5e4 100644 --- a/tests/regress/expected/test_update.out +++ b/tests/regress/expected/test_update.out @@ -7,7 +7,7 @@ SELECT diskquota.set_schema_quota('s4', '1 MB'); (1 row) SET search_path TO s4; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out index d8319d2b5bb..5099a1cada2 100644 --- a/tests/regress/expected/test_vacuum.out +++ b/tests/regress/expected/test_vacuum.out @@ -7,10 +7,10 @@ SELECT diskquota.set_schema_quota('s6', '1 MB'); (1 row) SET search_path TO s6; -CREATE TABLE a (i int); +CREATE TABLE a (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -CREATE TABLE b (i int); +CREATE TABLE b (i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_appendonly.sql b/tests/regress/sql/test_appendonly.sql index 2d42f8df959..8852934769c 100644 --- a/tests/regress/sql/test_appendonly.sql +++ b/tests/regress/sql/test_appendonly.sql @@ -2,8 +2,8 @@ CREATE SCHEMA s_appendonly; SET search_path TO s_appendonly; -CREATE TABLE t_ao(i int) WITH (appendonly=true); -CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column); +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. CREATE INDEX index_t ON t_ao(i); CREATE INDEX index_t2 ON t_aoco(i); diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_blackmap.sql index 2efc0478acc..47b6b783185 100644 --- a/tests/regress/sql/test_blackmap.sql +++ b/tests/regress/sql/test_blackmap.sql @@ -60,7 +60,7 @@ LANGUAGE 'plpgsql'; -- 1. Create an ordinary table and add its oid to blackmap on seg0. -- Check that it's relfilenode is blocked on seg0 by variouts conditions. -- -CREATE TABLE blocked_t1(i int); +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); -- Insert an entry for blocked_t1 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); @@ -111,7 +111,7 @@ SELECT rel.relname, be.target_type, -- 2. Test that the relfilenodes of toast relation together with its -- index are blocked on seg0. -- -CREATE TABLE blocked_t2(i text); +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); -- Insert an entry for blocked_t2 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); @@ -129,7 +129,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t3(i int) WITH (appendonly=true); +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE INDEX blocked_t3_index ON blocked_t3(i); -- Insert an entry for blocked_t3 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); @@ -148,7 +148,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column); +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t4_index ON blocked_t4(i); -- Insert an entry for blocked_t4 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); @@ -167,7 +167,7 @@ SELECT replace_oid_with_relname(rel.relname), -- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its -- auxiliary relations are blocked on seg0. -- -CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column); +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t5_index ON blocked_t5(i); -- Insert an entry for blocked_t5 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); diff --git a/tests/regress/sql/test_column.sql b/tests/regress/sql/test_column.sql index b8698244f52..125940ed9da 100644 --- a/tests/regress/sql/test_column.sql +++ b/tests/regress/sql/test_column.sql @@ -4,7 +4,7 @@ SELECT diskquota.set_schema_quota('scolumn', '1 MB'); SET search_path TO scolumn; SELECT diskquota.wait_for_worker_new_epoch(); -CREATE TABLE a2(i INT); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); -- expect fail INSERT INTO a2 SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_copy.sql b/tests/regress/sql/test_copy.sql index aca811e0b25..92003562370 100644 --- a/tests/regress/sql/test_copy.sql +++ b/tests/regress/sql/test_copy.sql @@ -5,7 +5,7 @@ SET search_path TO s3; \! seq 100 > /tmp/csmall.txt -CREATE TABLE c (i int); +CREATE TABLE c (i int) DISTRIBUTED BY (i); COPY c FROM '/tmp/csmall.txt'; -- expect failed INSERT INTO c SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_ctas_pause.sql b/tests/regress/sql/test_ctas_pause.sql index 8980ce904c4..4e5e8df4bd5 100644 --- a/tests/regress/sql/test_ctas_pause.sql +++ b/tests/regress/sql/test_ctas_pause.sql @@ -7,11 +7,11 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- heap table -CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect fail SELECT diskquota.pause(); -CREATE TABLE t1 AS SELECT generate_series(1,1000000); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect succeed -- disable hardlimit and do some clean-ups. \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql index 19750be032e..ef554bb9331 100644 --- a/tests/regress/sql/test_ctas_role.sql +++ b/tests/regress/sql/test_ctas_role.sql @@ -7,19 +7,19 @@ GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 10000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 10000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- temp table -CREATE TEMP TABLE t2 AS SELECT generate_series(1, 100000000); +CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); SELECT diskquota.wait_for_worker_new_epoch(); -- toast table -CREATE TABLE toast_table AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table diff --git a/tests/regress/sql/test_ctas_schema.sql b/tests/regress/sql/test_ctas_schema.sql index e4e4db3752c..06b11592176 100644 --- a/tests/regress/sql/test_ctas_schema.sql +++ b/tests/regress/sql/test_ctas_schema.sql @@ -7,16 +7,16 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SET search_path TO hardlimit_s; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table diff --git a/tests/regress/sql/test_ctas_tablespace_role.sql b/tests/regress/sql/test_ctas_tablespace_role.sql index 628ef94d191..35a236fcdb3 100644 --- a/tests/regress/sql/test_ctas_tablespace_role.sql +++ b/tests/regress/sql/test_ctas_tablespace_role.sql @@ -16,21 +16,21 @@ SET default_tablespace = ctas_rolespc; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups. diff --git a/tests/regress/sql/test_ctas_tablespace_schema.sql b/tests/regress/sql/test_ctas_tablespace_schema.sql index 57a3c73e497..b467566e1b2 100644 --- a/tests/regress/sql/test_ctas_tablespace_schema.sql +++ b/tests/regress/sql/test_ctas_tablespace_schema.sql @@ -15,21 +15,21 @@ SET search_path TO hardlimit_s; SET default_tablespace = ctas_schemaspc; -- heap table -CREATE TABLE t1 AS SELECT generate_series(1, 100000000); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- toast table -CREATE TABLE toast_table - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000); +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- ao table -CREATE TABLE ao_table WITH (appendonly=true) AS SELECT generate_series(1, 100000000); +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- disable hardlimit and do some clean-ups diff --git a/tests/regress/sql/test_delete_quota.sql b/tests/regress/sql/test_delete_quota.sql index b4c6572c940..3658b5ac314 100644 --- a/tests/regress/sql/test_delete_quota.sql +++ b/tests/regress/sql/test_delete_quota.sql @@ -3,7 +3,7 @@ CREATE SCHEMA deleteschema; SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); SET search_path TO deleteschema; -CREATE TABLE c (i INT); +CREATE TABLE c (i INT) DISTRIBUTED BY (i); -- expect failed INSERT INTO c SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql index f79ce3c32fc..b24e2ceb67a 100644 --- a/tests/regress/sql/test_drop_after_pause.sql +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -15,7 +15,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); \! gpstop -u > /dev/null CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail diff --git a/tests/regress/sql/test_drop_table.sql b/tests/regress/sql/test_drop_table.sql index f5e22b61f3f..7c0cd86ec54 100644 --- a/tests/regress/sql/test_drop_table.sql +++ b/tests/regress/sql/test_drop_table.sql @@ -2,8 +2,8 @@ CREATE SCHEMA sdrtbl; SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); SET search_path TO sdrtbl; -CREATE TABLE a(i INT); -CREATE TABLE a2(i INT); +CREATE TABLE a(i INT) DISTRIBUTED BY (i); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index e07577c6bdd..40d3f09e51d 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -24,7 +24,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -33,7 +33,7 @@ DROP TABLE SX.a; \c dbx1 CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); @@ -48,7 +48,7 @@ CREATE EXTENSION diskquota; SELECT diskquota.wait_for_worker_new_epoch(); \! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -60,7 +60,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -72,7 +72,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -84,7 +84,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -96,7 +96,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -108,7 +108,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); @@ -120,7 +120,7 @@ CREATE EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; -CREATE TABLE SX.a(i int); +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); INSERT INTO SX.a values(generate_series(0, 100000)); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_fast_disk_check.sql b/tests/regress/sql/test_fast_disk_check.sql index 4b92a5bcb77..c15e1bfed4f 100644 --- a/tests/regress/sql/test_fast_disk_check.sql +++ b/tests/regress/sql/test_fast_disk_check.sql @@ -2,7 +2,7 @@ CREATE SCHEMA s1; SET search_path to s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,200000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; diff --git a/tests/regress/sql/test_fetch_table_stat.sql b/tests/regress/sql/test_fetch_table_stat.sql index 65cbfeb56dd..0eabbdaf536 100644 --- a/tests/regress/sql/test_fetch_table_stat.sql +++ b/tests/regress/sql/test_fetch_table_stat.sql @@ -3,7 +3,7 @@ -- the error message is preserved for us to debug. -- -CREATE TABLE t_error_handling (i int); +CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); -- Inject an error to a segment server, since this UDF is only called on segments. SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; diff --git a/tests/regress/sql/test_index.sql b/tests/regress/sql/test_index.sql index 5b884b1e0af..c2d5f3983d4 100644 --- a/tests/regress/sql/test_index.sql +++ b/tests/regress/sql/test_index.sql @@ -7,7 +7,7 @@ DROP TABLESPACE IF EXISTS indexspc; CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; SET search_path TO indexschema1; -CREATE TABLE test_index_a(i int) TABLESPACE indexspc; +CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); INSERT INTO test_index_a SELECT generate_series(1,20000); SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_insert_after_drop.sql b/tests/regress/sql/test_insert_after_drop.sql index d811dfc7d22..c0ae2928a8a 100644 --- a/tests/regress/sql/test_insert_after_drop.sql +++ b/tests/regress/sql/test_insert_after_drop.sql @@ -6,7 +6,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA sdrtbl; SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); SET search_path TO sdrtbl; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_pause_and_resume.sql b/tests/regress/sql/test_pause_and_resume.sql index 00bfafb1466..b5ab0748491 100644 --- a/tests/regress/sql/test_pause_and_resume.sql +++ b/tests/regress/sql/test_pause_and_resume.sql @@ -2,7 +2,7 @@ CREATE SCHEMA s1; SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); -- expect insert succeed INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_pause_and_resume_multiple_db.sql b/tests/regress/sql/test_pause_and_resume_multiple_db.sql index 8b61c39cab7..10ff08e3bb4 100644 --- a/tests/regress/sql/test_pause_and_resume_multiple_db.sql +++ b/tests/regress/sql/test_pause_and_resume_multiple_db.sql @@ -12,11 +12,11 @@ CREATE EXTENSION diskquota; SELECT diskquota.wait_for_worker_new_epoch(); \c contrib_regression -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed \c test_pause_and_resume -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed \c contrib_regression @@ -47,7 +47,7 @@ INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed CREATE SCHEMA s1; CREATE EXTENSION diskquota; SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused -CREATE TABLE s1.a(i int); +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed SELECT diskquota.set_schema_quota('s1', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_primary_failure.sql b/tests/regress/sql/test_primary_failure.sql index ffe44466a56..14556741726 100644 --- a/tests/regress/sql/test_primary_failure.sql +++ b/tests/regress/sql/test_primary_failure.sql @@ -27,7 +27,7 @@ returns text as $$ return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') $$ language plpythonu; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_relation_cache.sql b/tests/regress/sql/test_relation_cache.sql index 3661ed3edfa..b9739d9a8c0 100644 --- a/tests/regress/sql/test_relation_cache.sql +++ b/tests/regress/sql/test_relation_cache.sql @@ -13,7 +13,7 @@ $$ LANGUAGE plpgsql; -- heap table begin; -create table t(i int); +create table t(i int) DISTRIBUTED BY (i); insert into t select generate_series(1, 100000); select count(*) from diskquota.show_relation_cache_all_seg(); @@ -25,7 +25,7 @@ drop table t; -- toast table begin; -create table t(t text); +create table t(t text) DISTRIBUTED BY (t); insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); select count(*) from diskquota.show_relation_cache_all_seg(); @@ -39,7 +39,7 @@ drop table t; -- AO table begin; -create table t(a int, b text) with(appendonly=true); +create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; select count(*) from diskquota.show_relation_cache_all_seg(); @@ -53,7 +53,7 @@ drop table t; -- AOCS table begin; -create table t(a int, b text) with(appendonly=true, orientation=column); +create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; select count(*) from diskquota.show_relation_cache_all_seg(); diff --git a/tests/regress/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql index ffe102a2642..0f04d9c9037 100644 --- a/tests/regress/sql/test_relation_size.sql +++ b/tests/regress/sql/test_relation_size.sql @@ -3,7 +3,7 @@ INSERT INTO t1 SELECT generate_series(1, 10000); SELECT diskquota.relation_size('t1'); SELECT pg_table_size('t1'); -CREATE TABLE t2(i int); +CREATE TABLE t2(i int) DISTRIBUTED BY (i); INSERT INTO t2 SELECT generate_series(1, 10000); SELECT diskquota.relation_size('t2'); SELECT pg_table_size('t2'); @@ -27,13 +27,13 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; -CREATE TABLE ao (i int) WITH (appendonly=true); +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); INSERT INTO ao SELECT generate_series(1, 10000); SELECT diskquota.relation_size('ao'); SELECT pg_relation_size('ao'); DROP TABLE ao; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; SELECT diskquota.relation_size('aocs'); SELECT pg_relation_size('aocs'); diff --git a/tests/regress/sql/test_rename.sql b/tests/regress/sql/test_rename.sql index b6b4390f8ec..d6440c621eb 100644 --- a/tests/regress/sql/test_rename.sql +++ b/tests/regress/sql/test_rename.sql @@ -2,7 +2,7 @@ CREATE SCHEMA srs1; SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -27,7 +27,7 @@ CREATE SCHEMA srr1; CREATE ROLE srerole NOLOGIN; SELECT diskquota.set_role_quota('srerole', '1MB'); SET search_path TO srr1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); ALTER TABLE a OWNER TO srerole; -- expect insert fail diff --git a/tests/regress/sql/test_reschema.sql b/tests/regress/sql/test_reschema.sql index 3814c8fbac0..feb61a05d1f 100644 --- a/tests/regress/sql/test_reschema.sql +++ b/tests/regress/sql/test_reschema.sql @@ -2,7 +2,7 @@ CREATE SCHEMA srE; SELECT diskquota.set_schema_quota('srE', '1 MB'); SET search_path TO srE; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index d78604a6e5e..b2f4fdadcad 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -5,9 +5,9 @@ SET search_path TO srole; CREATE ROLE u1 NOLOGIN; CREATE ROLE u2 NOLOGIN; -CREATE TABLE b (t TEXT); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT); +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); ALTER TABLE b2 OWNER TO u1; SELECT diskquota.set_role_quota('u1', '1 MB'); diff --git a/tests/regress/sql/test_schema.sql b/tests/regress/sql/test_schema.sql index a8f568dd442..ea70b270655 100644 --- a/tests/regress/sql/test_schema.sql +++ b/tests/regress/sql/test_schema.sql @@ -2,7 +2,7 @@ CREATE SCHEMA s1; SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); @@ -11,7 +11,7 @@ SELECT diskquota.set_schema_quota('s1', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -CREATE TABLE a2(i int); +CREATE TABLE a2(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); @@ -28,7 +28,7 @@ INSERT INTO s2.a SELECT generate_series(1,200); CREATE SCHEMA badquota; DROP ROLE IF EXISTS testbody; CREATE ROLE testbody; -CREATE TABLE badquota.t1(i INT); +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT diskquota.init_table_size_table(); diff --git a/tests/regress/sql/test_table_size.sql b/tests/regress/sql/test_table_size.sql index eb8c54bacbf..3db880bf27d 100644 --- a/tests/regress/sql/test_table_size.sql +++ b/tests/regress/sql/test_table_size.sql @@ -1,11 +1,11 @@ -- Test tablesize table -create table a(i text); +create table a(i text) DISTRIBUTED BY (i); insert into a select * from generate_series(1,10000); select pg_sleep(2); -create table buffer(oid oid, relname name, size bigint); +create table buffer(oid oid, relname name, size bigint) DISTRIBUTED BY (oid); insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index c1755457a2e..94afbeed69d 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -11,8 +11,8 @@ DROP ROLE IF EXISTS rolespcu1; DROP ROLE IF EXISTS rolespcu2; CREATE ROLE rolespcu1 NOLOGIN; CREATE ROLE rolespcu2 NOLOGIN; -CREATE TABLE b (t TEXT) TABLESPACE rolespc; -CREATE TABLE b2 (t TEXT) TABLESPACE rolespc; +CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); ALTER TABLE b2 OWNER TO rolespcu1; INSERT INTO b SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql index 4242da5c4dd..aa11f749e2a 100644 --- a/tests/regress/sql/test_tablespace_role_perseg.sql +++ b/tests/regress/sql/test_tablespace_role_perseg.sql @@ -11,7 +11,7 @@ DROP ROLE IF EXISTS rolespc_persegu1; DROP ROLE IF EXISTS rolespc_persegu2; CREATE ROLE rolespc_persegu1 NOLOGIN; CREATE ROLE rolespc_persegu2 NOLOGIN; -CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg; +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); ALTER TABLE b OWNER TO rolespc_persegu1; SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); diff --git a/tests/regress/sql/test_tablespace_schema.sql b/tests/regress/sql/test_tablespace_schema.sql index 1fb85cd3a78..be3e6fe56fb 100644 --- a/tests/regress/sql/test_tablespace_schema.sql +++ b/tests/regress/sql/test_tablespace_schema.sql @@ -7,7 +7,7 @@ DROP TABLESPACE IF EXISTS schemaspc; CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; SET search_path TO spcs1; -CREATE TABLE a(i int) TABLESPACE schemaspc; +CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); @@ -16,7 +16,7 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -CREATE TABLE a2(i int) TABLESPACE schemaspc; +CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index 56e7421052c..a5fb13eb71f 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -9,7 +9,7 @@ CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); SET search_path TO spcs1_perseg; -CREATE TABLE a(i int) TABLESPACE schemaspc_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); diff --git a/tests/regress/sql/test_temp_role.sql b/tests/regress/sql/test_temp_role.sql index b863098f500..856a48e8dd8 100644 --- a/tests/regress/sql/test_temp_role.sql +++ b/tests/regress/sql/test_temp_role.sql @@ -4,7 +4,7 @@ CREATE ROLE u3temp NOLOGIN; SET search_path TO strole; SELECT diskquota.set_role_quota('u3temp', '1MB'); -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); ALTER TABLE a OWNER TO u3temp; CREATE TEMP TABLE ta(i int); ALTER TABLE ta OWNER TO u3temp; diff --git a/tests/regress/sql/test_toast.sql b/tests/regress/sql/test_toast.sql index 98ab45e5926..e96a595a729 100644 --- a/tests/regress/sql/test_toast.sql +++ b/tests/regress/sql/test_toast.sql @@ -2,7 +2,7 @@ CREATE SCHEMA s5; SELECT diskquota.set_schema_quota('s5', '1 MB'); SET search_path TO s5; -CREATE TABLE a5 (message text); +CREATE TABLE a5 (t text) DISTRIBUTED BY (t); INSERT INTO a5 SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') diff --git a/tests/regress/sql/test_truncate.sql b/tests/regress/sql/test_truncate.sql index 057d6aeb155..2dafcb3126c 100644 --- a/tests/regress/sql/test_truncate.sql +++ b/tests/regress/sql/test_truncate.sql @@ -2,8 +2,8 @@ CREATE SCHEMA s7; SELECT diskquota.set_schema_quota('s7', '1 MB'); SET search_path TO s7; -CREATE TABLE a (i int); -CREATE TABLE b (i int); +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail diff --git a/tests/regress/sql/test_uncommitted_table_size.sql b/tests/regress/sql/test_uncommitted_table_size.sql index 0c3804de928..ee1c1e9b080 100644 --- a/tests/regress/sql/test_uncommitted_table_size.sql +++ b/tests/regress/sql/test_uncommitted_table_size.sql @@ -11,7 +11,7 @@ DROP table t1; -- heap table begin; -CREATE TABLE t2(i int); +CREATE TABLE t2(i int) DISTRIBUTED BY (i); INSERT INTO t2 SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; @@ -30,7 +30,7 @@ DROP table t2; -- toast table begin; -CREATE TABLE t3(t text); +CREATE TABLE t3(t text) DISTRIBUTED BY (t); INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; @@ -41,7 +41,7 @@ DROP table t3; -- AO table begin; -CREATE TABLE ao (i int) WITH (appendonly=true); +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); INSERT INTO ao SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= @@ -62,7 +62,7 @@ DROP TABLE ao; -- AO table CTAS begin; -CREATE TABLE ao WITH(appendonly=true) AS SELECT generate_series(1, 10000); +CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= (SELECT pg_table_size('ao')); @@ -71,7 +71,7 @@ DROP TABLE ao; -- AOCS table begin; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column); +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; @@ -90,7 +90,7 @@ DROP TABLE aocs; -- AOCS table CTAS begin; -CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; SELECT pg_table_size('aocs'); diff --git a/tests/regress/sql/test_update.sql b/tests/regress/sql/test_update.sql index 5745f262151..75fb6ee8783 100644 --- a/tests/regress/sql/test_update.sql +++ b/tests/regress/sql/test_update.sql @@ -2,7 +2,7 @@ CREATE SCHEMA s4; SELECT diskquota.set_schema_quota('s4', '1 MB'); SET search_path TO s4; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); -- expect update fail. diff --git a/tests/regress/sql/test_vacuum.sql b/tests/regress/sql/test_vacuum.sql index 2ddc35ed29b..3483db34469 100644 --- a/tests/regress/sql/test_vacuum.sql +++ b/tests/regress/sql/test_vacuum.sql @@ -2,8 +2,8 @@ CREATE SCHEMA s6; SELECT diskquota.set_schema_quota('s6', '1 MB'); SET search_path TO s6; -CREATE TABLE a (i int); -CREATE TABLE b (i int); +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail diff --git a/upgrade_test/expected/prepare.out b/upgrade_test/expected/prepare.out index 25b295b46f6..84afcfabf60 100644 --- a/upgrade_test/expected/prepare.out +++ b/upgrade_test/expected/prepare.out @@ -32,7 +32,7 @@ DROP ROLE IF EXISTS testbody; NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE badquota.t1(i INT); +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badquota.t1 OWNER TO testbody; @@ -60,7 +60,7 @@ SELECT diskquota.set_role_quota('badbody', '2 MB'); (1 row) -CREATE TABLE badbody_schema.t2(i INT); +CREATE TABLE badbody_schema.t2(i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badbody_schema.t2 OWNER TO badbody; diff --git a/upgrade_test/expected/set_config.out b/upgrade_test/expected/set_config.out index 221aaf76518..da44d8fb407 100644 --- a/upgrade_test/expected/set_config.out +++ b/upgrade_test/expected/set_config.out @@ -51,11 +51,11 @@ CREATE ROLE u1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE u2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT); +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO u1; diff --git a/upgrade_test/expected/test_delete_quota.out b/upgrade_test/expected/test_delete_quota.out index cbc3928ed9a..cfa97bab5b0 100644 --- a/upgrade_test/expected/test_delete_quota.out +++ b/upgrade_test/expected/test_delete_quota.out @@ -2,7 +2,7 @@ -- CREATE SCHEMA deleteschema; -- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); SET search_path TO deleteschema; -CREATE TABLE c (i INT); +CREATE TABLE c (i INT) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect failed diff --git a/upgrade_test/expected/test_rename.out b/upgrade_test/expected/test_rename.out index 198d5609c63..c91de17882b 100644 --- a/upgrade_test/expected/test_rename.out +++ b/upgrade_test/expected/test_rename.out @@ -2,7 +2,7 @@ -- CREATE SCHEMA srs1; -- SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail @@ -34,7 +34,7 @@ DROP SCHEMA srs2; -- CREATE ROLE srerole NOLOGIN; -- SELECT diskquota.set_role_quota('srerole', '1MB'); SET search_path TO srr1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO srerole; diff --git a/upgrade_test/expected/test_reschema.out b/upgrade_test/expected/test_reschema.out index 0bbe07c3881..10d280dd0ee 100644 --- a/upgrade_test/expected/test_reschema.out +++ b/upgrade_test/expected/test_reschema.out @@ -2,7 +2,7 @@ -- CREATE SCHEMA srE; -- SELECT diskquota.set_schema_quota('srE', '1 MB'); SET search_path TO srE; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail diff --git a/upgrade_test/expected/test_role.out b/upgrade_test/expected/test_role.out index beee72011a3..df883066aa0 100644 --- a/upgrade_test/expected/test_role.out +++ b/upgrade_test/expected/test_role.out @@ -4,9 +4,9 @@ -- -- CREATE ROLE u1 NOLOGIN; -- CREATE ROLE u2 NOLOGIN; --- CREATE TABLE b (t TEXT); +-- CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -- ALTER TABLE b OWNER TO u1; --- CREATE TABLE b2 (t TEXT); +-- CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); -- ALTER TABLE b2 OWNER TO u1; -- -- SELECT diskquota.set_role_quota('u1', '1 MB'); diff --git a/upgrade_test/expected/test_schema.out b/upgrade_test/expected/test_schema.out index aa2011dba28..ab4864ad472 100644 --- a/upgrade_test/expected/test_schema.out +++ b/upgrade_test/expected/test_schema.out @@ -2,7 +2,7 @@ -- CREATE SCHEMA s1; -- SELECT diskquota.set_schema_quota('s1', '1 MB'); SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); @@ -17,7 +17,7 @@ SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name:s1 -CREATE TABLE a2(i int); +CREATE TABLE a2(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail diff --git a/upgrade_test/expected/test_temp_role.out b/upgrade_test/expected/test_temp_role.out index f867127339f..f3a415a54b4 100644 --- a/upgrade_test/expected/test_temp_role.out +++ b/upgrade_test/expected/test_temp_role.out @@ -3,7 +3,7 @@ -- CREATE ROLE u3temp NOLOGIN; SET search_path TO strole; -- SELECT diskquota.set_role_quota('u3temp', '1MB'); -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO u3temp; diff --git a/upgrade_test/sql/prepare.sql b/upgrade_test/sql/prepare.sql index 0782a7e261e..f3de240d784 100644 --- a/upgrade_test/sql/prepare.sql +++ b/upgrade_test/sql/prepare.sql @@ -11,7 +11,7 @@ CREATE SCHEMA badquota; SELECT diskquota.set_schema_quota('badquota', '1 MB'); DROP ROLE IF EXISTS testbody; CREATE ROLE testbody; -CREATE TABLE badquota.t1(i INT); +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT pg_sleep(10); @@ -24,7 +24,7 @@ CREATE SCHEMA badbody_schema; DROP ROLE IF EXISTS badbody; CREATE ROLE badbody; SELECT diskquota.set_role_quota('badbody', '2 MB'); -CREATE TABLE badbody_schema.t2(i INT); +CREATE TABLE badbody_schema.t2(i INT) DISTRIBUTED BY (i); ALTER TABLE badbody_schema.t2 OWNER TO badbody; INSERT INTO badbody_schema.t2 SELECT generate_series(0, 100000); SELECT pg_sleep(10); diff --git a/upgrade_test/sql/set_config.sql b/upgrade_test/sql/set_config.sql index 316dcc913ca..56d171e8f18 100644 --- a/upgrade_test/sql/set_config.sql +++ b/upgrade_test/sql/set_config.sql @@ -22,9 +22,9 @@ DROP ROLE IF EXISTS u1; DROP ROLE IF EXISTS u2; CREATE ROLE u1 NOLOGIN; CREATE ROLE u2 NOLOGIN; -CREATE TABLE b (t TEXT); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT); +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); ALTER TABLE b2 OWNER TO u1; SELECT diskquota.set_role_quota('u1', '1 MB'); diff --git a/upgrade_test/sql/test_delete_quota.sql b/upgrade_test/sql/test_delete_quota.sql index 5f5abfc99f1..19151824e8c 100644 --- a/upgrade_test/sql/test_delete_quota.sql +++ b/upgrade_test/sql/test_delete_quota.sql @@ -3,7 +3,7 @@ -- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); SET search_path TO deleteschema; -CREATE TABLE c (i INT); +CREATE TABLE c (i INT) DISTRIBUTED BY (i); -- expect failed INSERT INTO c SELECT generate_series(1,100000); SELECT pg_sleep(10); diff --git a/upgrade_test/sql/test_rename.sql b/upgrade_test/sql/test_rename.sql index 394592322a1..5c2ece9df30 100644 --- a/upgrade_test/sql/test_rename.sql +++ b/upgrade_test/sql/test_rename.sql @@ -2,7 +2,7 @@ -- CREATE SCHEMA srs1; -- SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(5); @@ -27,7 +27,7 @@ DROP SCHEMA srs2; -- CREATE ROLE srerole NOLOGIN; -- SELECT diskquota.set_role_quota('srerole', '1MB'); SET search_path TO srr1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); ALTER TABLE a OWNER TO srerole; -- expect insert fail diff --git a/upgrade_test/sql/test_reschema.sql b/upgrade_test/sql/test_reschema.sql index 0c5dca3e1e0..73909297e2f 100644 --- a/upgrade_test/sql/test_reschema.sql +++ b/upgrade_test/sql/test_reschema.sql @@ -2,7 +2,7 @@ -- CREATE SCHEMA srE; -- SELECT diskquota.set_schema_quota('srE', '1 MB'); SET search_path TO srE; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(5); diff --git a/upgrade_test/sql/test_role.sql b/upgrade_test/sql/test_role.sql index 39881adbf31..08d19423486 100644 --- a/upgrade_test/sql/test_role.sql +++ b/upgrade_test/sql/test_role.sql @@ -5,9 +5,9 @@ -- -- CREATE ROLE u1 NOLOGIN; -- CREATE ROLE u2 NOLOGIN; --- CREATE TABLE b (t TEXT); +-- CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); -- ALTER TABLE b OWNER TO u1; --- CREATE TABLE b2 (t TEXT); +-- CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); -- ALTER TABLE b2 OWNER TO u1; -- -- SELECT diskquota.set_role_quota('u1', '1 MB'); diff --git a/upgrade_test/sql/test_schema.sql b/upgrade_test/sql/test_schema.sql index ace95c36736..b56ad59ef05 100644 --- a/upgrade_test/sql/test_schema.sql +++ b/upgrade_test/sql/test_schema.sql @@ -3,14 +3,14 @@ -- SELECT diskquota.set_schema_quota('s1', '1 MB'); SET search_path TO s1; -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); SELECT pg_sleep(5); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -CREATE TABLE a2(i int); +CREATE TABLE a2(i int) DISTRIBUTED BY (i); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); diff --git a/upgrade_test/sql/test_temp_role.sql b/upgrade_test/sql/test_temp_role.sql index 9af1d9723d5..6f4b52298e8 100644 --- a/upgrade_test/sql/test_temp_role.sql +++ b/upgrade_test/sql/test_temp_role.sql @@ -4,7 +4,7 @@ SET search_path TO strole; -- SELECT diskquota.set_role_quota('u3temp', '1MB'); -CREATE TABLE a(i int); +CREATE TABLE a(i int) DISTRIBUTED BY (i); ALTER TABLE a OWNER TO u3temp; CREATE TEMP TABLE ta(i int); ALTER TABLE ta OWNER TO u3temp; From 0b00d79ccfc7a719bf6746f89ba1f30f8194ac8e Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 21 Feb 2022 22:25:03 +0800 Subject: [PATCH 129/330] Simplify the build script (#148) No need to have the switch case. --- concourse/scripts/build_diskquota.sh | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 15eb94eb800..66638baebf8 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -20,33 +20,14 @@ function pkg() { pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component - install_files=( \ + tar -czf "$TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-${DISKQUOTA_OS}_x86_64.tar.gz" \ "lib/postgresql/diskquota.so" \ "share/postgresql/extension/diskquota.control" \ "share/postgresql/extension/diskquota--1.0.sql" \ "share/postgresql/extension/diskquota--2.0.sql" \ "share/postgresql/extension/diskquota--1.0--2.0.sql" \ "share/postgresql/extension/diskquota--2.0--1.0.sql" \ - "install_gpdb_component") - case "$DISKQUOTA_OS" in - rhel6) - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel6_x86_64.tar.gz \ - "${install_files[@]}" - ;; - rhel7) - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel7_x86_64.tar.gz \ - "${install_files[@]}" - ;; - rhel8) - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-rhel8_x86_64.tar.gz \ - "${install_files[@]}" - ;; - ubuntu18.04) - tar -czf $TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-ubuntu18.04_x86_64.tar.gz \ - "${install_files[@]}" - ;; - *) echo "Unknown OS: $DISKQUOTA_OS"; exit 1 ;; - esac + "install_gpdb_component" popd } From 8861006dcc3290523d9e6a6da902d83718a844ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Tue, 22 Feb 2022 10:23:29 +0800 Subject: [PATCH 130/330] Revert greenplum-db/diskquota#138 (#150) Revert "Extend fetch_table_stat() to update db cache (#138)" This reverts commit e4a19d70db77053aca3226144052699ce72f406d. --- diskquota--1.0.sql | 5 +++ diskquota--2.0.sql | 5 +++ diskquota.c | 20 +++++++-- diskquota.h | 4 +- diskquota_utility.c | 38 ++++++++-------- gp_activetable.c | 29 +++---------- gp_activetable.h | 1 - quotamodel.c | 15 +++---- tests/regress/diskquota_schedule | 1 - .../regress/expected/test_update_db_cache.out | 43 ------------------- tests/regress/sql/test_update_db_cache.sql | 36 ---------------- 11 files changed, 59 insertions(+), 138 deletions(-) delete mode 100644 tests/regress/expected/test_update_db_cache.out delete mode 100644 tests/regress/sql/test_update_db_cache.sql diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 6d950849cd0..282758a8a14 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -21,6 +21,11 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)) DISTRIBUTED BY (state); diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 34a9ee811b1..c5a1c8ee6df 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -43,6 +43,11 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; + CREATE TYPE diskquota.blackmap_entry AS (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) diff --git a/diskquota.c b/diskquota.c index 50df27e3b71..060cce150e3 100644 --- a/diskquota.c +++ b/diskquota.c @@ -132,15 +132,15 @@ _PG_init(void) init_disk_quota_enforcement(); init_active_table_hook(); - /* Add dq_object_access_hook to handle drop extension event. */ - register_diskquota_object_access_hook(); - /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { return; } + /* Add dq_object_access_hook to handle drop extension event. */ + register_diskquota_object_access_hook(); + /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; @@ -548,7 +548,10 @@ create_monitor_db_table(void) bool ret = true; sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" + "create schema if not exists diskquota;" + "create or replace function diskquota.update_diskquota_db_list(oid, int4) returns void " + "strict as '$libdir/diskquota' language C;"; StartTransactionCommand(); @@ -914,6 +917,15 @@ del_dbid_from_database_list(Oid dbid) ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } pfree(str.data); + + /* clean the dbid from shared memory*/ + initStringInfo(&str); + appendStringInfo(&str, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 1)" + " from gp_dist_random('gp_id');", dbid); + ret = SPI_execute(str.data, true, 0); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); + pfree(str.data); } /* diff --git a/diskquota.h b/diskquota.h index d9e596e7556..298f1c6f5e4 100644 --- a/diskquota.h +++ b/diskquota.h @@ -32,9 +32,7 @@ typedef enum typedef enum { FETCH_ACTIVE_OID, /* fetch active table list */ - FETCH_ACTIVE_SIZE, /* fetch size for active tables */ - ADD_DB_TO_MONITOR, - REMOVE_DB_FROM_BEING_MONITORED, + FETCH_ACTIVE_SIZE /* fetch size for active tables */ } FetchTableStatType; typedef enum diff --git a/diskquota_utility.c b/diskquota_utility.c index 8f093cc9c53..ba62e2c2c41 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -56,6 +56,7 @@ PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); +PG_FUNCTION_INFO_V1(update_diskquota_db_list); PG_FUNCTION_INFO_V1(set_per_segment_quota); PG_FUNCTION_INFO_V1(relation_size_local); @@ -499,17 +500,6 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, if (oid != objectId) goto out; - /* - * Remove the current database from monitored db cache - * on all segments and on coordinator. - */ - update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); - - if (!IS_QUERY_DISPATCHER()) - { - return; - } - /* * Lock on extension_ddl_lock to avoid multiple backend create diskquota * extension at the same time. @@ -1023,29 +1013,38 @@ get_size_in_mb(char *str) /* * Function to update the db list on each segment - * Will print a WARNING to log if out of memory */ -void -update_diskquota_db_list(Oid dbid, HASHACTION action) +Datum +update_diskquota_db_list(PG_FUNCTION_ARGS) { + Oid dbid = PG_GETARG_OID(0); + int mode = PG_GETARG_INT32(1); bool found = false; + if (!superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to update db list"))); + } + /* add/remove the dbid to monitoring database cache to filter out table not under * monitoring in hook functions */ LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - if (action == HASH_ENTER) + if (mode == 0) { Oid *entry = NULL; - entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); - if (entry == NULL) + entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); + elog(WARNING, "add dbid %u into SHM", dbid); + if (!found && entry == NULL) { ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } } - else if (action == HASH_REMOVE) + else if (mode == 1) { hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); if (!found) @@ -1055,6 +1054,9 @@ update_diskquota_db_list(Oid dbid, HASHACTION action) } } LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); + + PG_RETURN_VOID(); + } /* diff --git a/gp_activetable.c b/gp_activetable.c index cbeb772f55d..352d998d7bd 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -348,22 +348,11 @@ gp_fetch_active_tables(bool is_init) /* * Function to get the table size from each segments - * There are 4 modes: - * - * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since - * table may only be modified on a subset of the segments, we need to firstly - * gather the active table oid list from all the segments. - * - * - FETCH_ACTIVE_SIZE: calculate the active table size based on the active - * table oid list. - * - * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that - * active tables in the current database will be recorded. This is used each - * time a worker starts. - * - * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored - * db cache so that active tables in the current database will be recorded. - * This is used when DROP EXTENSION. + * There are three mode: + * 1. gather active table oid from all the segments, since table may only + * be modified on a subset of the segments, we need to firstly gather the + * active table oid list from all the segments. + * 2. calculate the active table size based on the active table oid list. */ Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS) @@ -411,12 +400,6 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) case FETCH_ACTIVE_SIZE: localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); break; - case ADD_DB_TO_MONITOR: - update_diskquota_db_list(MyDatabaseId, HASH_ENTER); - break; - case REMOVE_DB_FROM_BEING_MONITORED: - update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); - break; default: ereport(ERROR, (errmsg("Unused mode number, transaction will be aborted"))); break; @@ -427,7 +410,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) * total number of active tables to be returned, each tuple contains * one active table stat */ - funcctx->max_calls = localCacheTable ? (uint32) hash_get_num_entries(localCacheTable) : 0; + funcctx->max_calls = (uint32) hash_get_num_entries(localCacheTable); /* * prepare attribute metadata for next calls that generate the tuple diff --git a/gp_activetable.h b/gp_activetable.h index 66ccc2916e8..c2b0cfcea6e 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -29,7 +29,6 @@ extern HTAB *gp_fetch_active_tables(bool force); extern void init_active_table_hook(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); -extern void update_diskquota_db_list(Oid dbid, HASHACTION action); extern HTAB *active_tables_map; extern HTAB *monitoring_dbid_cache; diff --git a/quotamodel.c b/quotamodel.c index 5a2cf606312..2c0c360a726 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -612,18 +612,15 @@ do_check_diskquota_state_is_ready(void) int i; StringInfoData sql_command; + /* Add the dbid to watching list, so the hook can catch the table change*/ initStringInfo(&sql_command); - /* Add current database to the monitored db cache on all segments */ - appendStringInfo(&sql_command, - "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " - "FROM gp_dist_random('gp_id');", ADD_DB_TO_MONITOR); + appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id') UNION ALL select -1, diskquota.update_diskquota_db_list(%u, 0);", + MyDatabaseId, MyDatabaseId); ret = SPI_execute(sql_command.data, true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + if (ret != SPI_OK_SELECT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); pfree(sql_command.data); - /* Add current database to the monitored db cache on coordinator */ - update_diskquota_db_list(MyDatabaseId, HASH_ENTER); /* * check diskquota state from table diskquota.state errors will be catch * at upper level function. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index bae728b4904..45b2c147ceb 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -7,7 +7,6 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status -test: test_update_db_cache # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out deleted file mode 100644 index a22374c48d6..00000000000 --- a/tests/regress/expected/test_update_db_cache.out +++ /dev/null @@ -1,43 +0,0 @@ ---start_ignore -CREATE DATABASE test_db_cache; ---end_ignore -\c test_db_cache -CREATE EXTENSION diskquota; -CREATE TABLE t(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 't'::regclass -ORDER BY segid; - tableid | size | segid ----------+---------+------- - t | 3637248 | -1 - t | 1212416 | 0 - t | 1212416 | 1 - t | 1212416 | 2 -(4 rows) - -DROP EXTENSION diskquota; --- Create table without extension -CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -CREATE EXTENSION diskquota; -WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. --- Should find nothing since t_no_extension is not recorded. -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - diskquota_fetch_table_stat ----------------------------- -(0 rows) - -DROP TABLE t; -DROP TABLE t_no_extension; -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_db_cache; diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql deleted file mode 100644 index 5da48e137bf..00000000000 --- a/tests/regress/sql/test_update_db_cache.sql +++ /dev/null @@ -1,36 +0,0 @@ ---start_ignore -CREATE DATABASE test_db_cache; ---end_ignore - -\c test_db_cache -CREATE EXTENSION diskquota; - -CREATE TABLE t(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); - -SELECT diskquota.wait_for_worker_new_epoch(); - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 't'::regclass -ORDER BY segid; - -DROP EXTENSION diskquota; - --- Create table without extension -CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); - -CREATE EXTENSION diskquota; - --- Should find nothing since t_no_extension is not recorded. -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - -DROP TABLE t; -DROP TABLE t_no_extension; - -DROP EXTENSION diskquota; - -\c contrib_regression -DROP DATABASE test_db_cache; \ No newline at end of file From 725d0929ea2941fe17a099b6987d9a17ab0acd6d Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 22 Feb 2022 10:50:14 +0800 Subject: [PATCH 131/330] Add clang-format and editorconfig (#136) - clang-format is from https://groups.google.com/a/greenplum.org/g/gpdb-dev/c/rHb4DjSd1iI/m/FKPbqwYcBAAJ - See editorconfig at https://editorconfig.org/ --- .clang-format | 40 ++++++++++++++++++++++++++++++++++++++++ .editorconfig | 22 ++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 .clang-format create mode 100644 .editorconfig diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000000..72ce535cdc0 --- /dev/null +++ b/.clang-format @@ -0,0 +1,40 @@ +--- +BasedOnStyle: Google + +# How much whitespace? +UseTab: ForIndentation +TabWidth: 4 +IndentWidth: 4 +ContinuationIndentWidth: 8 + +SpacesBeforeTrailingComments: 1 + +# Line things up +AccessModifierOffset: -4 # outdent `public:`, etc + +DerivePointerAlignment: false +PointerAlignment: Right # char *foo, char &bar + +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true + +# Braces +AlwaysBreakAfterReturnType: TopLevelDefinitions +AllowShortFunctionsOnASingleLine: Inline +BreakBeforeBraces: Custom +BraceWrapping: + AfterStruct: true + AfterClass: true + AfterEnum: true + AfterFunction: true + AfterControlStatement: true + AfterNamespace: false + AfterExternBlock: false + BeforeCatch: true + SplitEmptyFunction: false + SplitEmptyRecord: false + +# Put "postgres.h" and "postgres_undefs.h" first in a group of includes. +IncludeCategories: +- Regex: '"postgres(_undefs)?.h"' + Priority: 1 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000000..ba9b81bbb1f --- /dev/null +++ b/.editorconfig @@ -0,0 +1,22 @@ +root = true + +[*.{c,cpp,h,y}] +indent_style = tab +indent_size = 4 + +[{GNUmakefile,Makefile}*] +indent_style = tab +indent_size = 4 + +[*.mk] +indent_style = tab +indent_size = 4 + +[*.py] +indent_style = space +indent_size = 4 + +[*.{dxl,mdp}] +indent_style = space +indent_size = 2 + From 9684b6350deb0a7d3849cf4cd202cd97c9b14882 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Tue, 22 Feb 2022 12:20:03 +0800 Subject: [PATCH 132/330] Extend fetch_table_stat() to update db cache v2 (#151) The db cache stores which databases enables diskquota. Active tables will be recorded only if they are in those databases. Previously, we created a new UDF update_diskquota_db_list() to add the current db to the cache. However, the UDF is install in a wrong database. As a result, after the user upgrade from a previous version to 1.0.3, the bgworker does not find the UDF and can do nothing. This patch fixes the issue by removing update_diskquota_db_list() and using fetch_table_stat() to update db cache. fetch_table_stat() already exists since version 1.0.0 so that no new UDF is needed. This PR is a revision of PR #138 , and depends on #130 to fix a race condition that occurs after CREATE EXTENSION. Co-authored-by: Chen Mulong --- diskquota--2.0.sql | 5 -- diskquota.c | 20 ++----- diskquota.h | 4 +- diskquota_utility.c | 38 +++++++------ gp_activetable.c | 29 +++++++--- gp_activetable.h | 1 + quotamodel.c | 17 +++--- tests/regress/diskquota_schedule | 1 + .../regress/expected/test_update_db_cache.out | 54 +++++++++++++++++++ tests/regress/sql/test_update_db_cache.sql | 43 +++++++++++++++ 10 files changed, 158 insertions(+), 54 deletions(-) create mode 100644 tests/regress/expected/test_update_db_cache.out create mode 100644 tests/regress/sql/test_update_db_cache.sql diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index c5a1c8ee6df..34a9ee811b1 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -43,11 +43,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - CREATE TYPE diskquota.blackmap_entry AS (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) diff --git a/diskquota.c b/diskquota.c index 060cce150e3..50df27e3b71 100644 --- a/diskquota.c +++ b/diskquota.c @@ -132,15 +132,15 @@ _PG_init(void) init_disk_quota_enforcement(); init_active_table_hook(); + /* Add dq_object_access_hook to handle drop extension event. */ + register_diskquota_object_access_hook(); + /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { return; } - /* Add dq_object_access_hook to handle drop extension event. */ - register_diskquota_object_access_hook(); - /* set up common data for diskquota launcher worker */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; @@ -548,10 +548,7 @@ create_monitor_db_table(void) bool ret = true; sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" - "create schema if not exists diskquota;" - "create or replace function diskquota.update_diskquota_db_list(oid, int4) returns void " - "strict as '$libdir/diskquota' language C;"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; StartTransactionCommand(); @@ -917,15 +914,6 @@ del_dbid_from_database_list(Oid dbid) ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); } pfree(str.data); - - /* clean the dbid from shared memory*/ - initStringInfo(&str); - appendStringInfo(&str, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 1)" - " from gp_dist_random('gp_id');", dbid); - ret = SPI_execute(str.data, true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); - pfree(str.data); } /* diff --git a/diskquota.h b/diskquota.h index 298f1c6f5e4..d9e596e7556 100644 --- a/diskquota.h +++ b/diskquota.h @@ -32,7 +32,9 @@ typedef enum typedef enum { FETCH_ACTIVE_OID, /* fetch active table list */ - FETCH_ACTIVE_SIZE /* fetch size for active tables */ + FETCH_ACTIVE_SIZE, /* fetch size for active tables */ + ADD_DB_TO_MONITOR, + REMOVE_DB_FROM_BEING_MONITORED, } FetchTableStatType; typedef enum diff --git a/diskquota_utility.c b/diskquota_utility.c index ba62e2c2c41..8f093cc9c53 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -56,7 +56,6 @@ PG_FUNCTION_INFO_V1(set_schema_quota); PG_FUNCTION_INFO_V1(set_role_quota); PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); -PG_FUNCTION_INFO_V1(update_diskquota_db_list); PG_FUNCTION_INFO_V1(set_per_segment_quota); PG_FUNCTION_INFO_V1(relation_size_local); @@ -500,6 +499,17 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, if (oid != objectId) goto out; + /* + * Remove the current database from monitored db cache + * on all segments and on coordinator. + */ + update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); + + if (!IS_QUERY_DISPATCHER()) + { + return; + } + /* * Lock on extension_ddl_lock to avoid multiple backend create diskquota * extension at the same time. @@ -1013,38 +1023,29 @@ get_size_in_mb(char *str) /* * Function to update the db list on each segment + * Will print a WARNING to log if out of memory */ -Datum -update_diskquota_db_list(PG_FUNCTION_ARGS) +void +update_diskquota_db_list(Oid dbid, HASHACTION action) { - Oid dbid = PG_GETARG_OID(0); - int mode = PG_GETARG_INT32(1); bool found = false; - if (!superuser()) - { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to update db list"))); - } - /* add/remove the dbid to monitoring database cache to filter out table not under * monitoring in hook functions */ LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - if (mode == 0) + if (action == HASH_ENTER) { Oid *entry = NULL; - entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER, &found); - elog(WARNING, "add dbid %u into SHM", dbid); - if (!found && entry == NULL) + entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) { ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } } - else if (mode == 1) + else if (action == HASH_REMOVE) { hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); if (!found) @@ -1054,9 +1055,6 @@ update_diskquota_db_list(PG_FUNCTION_ARGS) } } LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); - - PG_RETURN_VOID(); - } /* diff --git a/gp_activetable.c b/gp_activetable.c index 352d998d7bd..cbeb772f55d 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -348,11 +348,22 @@ gp_fetch_active_tables(bool is_init) /* * Function to get the table size from each segments - * There are three mode: - * 1. gather active table oid from all the segments, since table may only - * be modified on a subset of the segments, we need to firstly gather the - * active table oid list from all the segments. - * 2. calculate the active table size based on the active table oid list. + * There are 4 modes: + * + * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since + * table may only be modified on a subset of the segments, we need to firstly + * gather the active table oid list from all the segments. + * + * - FETCH_ACTIVE_SIZE: calculate the active table size based on the active + * table oid list. + * + * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that + * active tables in the current database will be recorded. This is used each + * time a worker starts. + * + * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored + * db cache so that active tables in the current database will be recorded. + * This is used when DROP EXTENSION. */ Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS) @@ -400,6 +411,12 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) case FETCH_ACTIVE_SIZE: localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); break; + case ADD_DB_TO_MONITOR: + update_diskquota_db_list(MyDatabaseId, HASH_ENTER); + break; + case REMOVE_DB_FROM_BEING_MONITORED: + update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); + break; default: ereport(ERROR, (errmsg("Unused mode number, transaction will be aborted"))); break; @@ -410,7 +427,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) * total number of active tables to be returned, each tuple contains * one active table stat */ - funcctx->max_calls = (uint32) hash_get_num_entries(localCacheTable); + funcctx->max_calls = localCacheTable ? (uint32) hash_get_num_entries(localCacheTable) : 0; /* * prepare attribute metadata for next calls that generate the tuple diff --git a/gp_activetable.h b/gp_activetable.h index c2b0cfcea6e..66ccc2916e8 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -29,6 +29,7 @@ extern HTAB *gp_fetch_active_tables(bool force); extern void init_active_table_hook(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); +extern void update_diskquota_db_list(Oid dbid, HASHACTION action); extern HTAB *active_tables_map; extern HTAB *monitoring_dbid_cache; diff --git a/quotamodel.c b/quotamodel.c index 2c0c360a726..e63ab2bf6a5 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -612,15 +612,20 @@ do_check_diskquota_state_is_ready(void) int i; StringInfoData sql_command; - /* Add the dbid to watching list, so the hook can catch the table change*/ initStringInfo(&sql_command); - appendStringInfo(&sql_command, "select gp_segment_id, diskquota.update_diskquota_db_list(%u, 0) from gp_dist_random('gp_id') UNION ALL select -1, diskquota.update_diskquota_db_list(%u, 0);", - MyDatabaseId, MyDatabaseId); + /* Add current database to the monitored db cache on all segments */ + appendStringInfo(&sql_command, + "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " + "FROM gp_dist_random('gp_id');", ADD_DB_TO_MONITOR); ret = SPI_execute(sql_command.data, true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + if (ret != SPI_OK_SELECT) { + pfree(sql_command.data); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + } pfree(sql_command.data); + /* Add current database to the monitored db cache on coordinator */ + update_diskquota_db_list(MyDatabaseId, HASH_ENTER); /* * check diskquota state from table diskquota.state errors will be catch * at upper level function. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 45b2c147ceb..bae728b4904 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -7,6 +7,7 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status +test: test_update_db_cache # disable this tese due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out new file mode 100644 index 00000000000..49837c93292 --- /dev/null +++ b/tests/regress/expected/test_update_db_cache.out @@ -0,0 +1,54 @@ +--start_ignore +CREATE DATABASE test_db_cache; +--end_ignore +\c test_db_cache +CREATE EXTENSION diskquota; +CREATE TABLE t(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 't'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + t | 3637248 | -1 + t | 1212416 | 0 + t | 1212416 | 1 + t | 1212416 | 2 +(4 rows) + +DROP EXTENSION diskquota; +-- Create table without extension +CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. +-- Sleep until the worker adds the current db to cache so that it can be found +-- when DROP EXTENSION. +-- FIXME: We cannot use wait_for_worker_new_epoch() here because +-- diskquota.state is not clean. Change sleep() to wait() after removing +-- diskquota.state +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +-- Should find nothing since t_no_extension is not recorded. +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +DROP TABLE t; +DROP TABLE t_no_extension; +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_db_cache; diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql new file mode 100644 index 00000000000..9fb70b56c9b --- /dev/null +++ b/tests/regress/sql/test_update_db_cache.sql @@ -0,0 +1,43 @@ +--start_ignore +CREATE DATABASE test_db_cache; +--end_ignore + +\c test_db_cache +CREATE EXTENSION diskquota; + +CREATE TABLE t(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 't'::regclass +ORDER BY segid; + +DROP EXTENSION diskquota; + +-- Create table without extension +CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; + +-- Sleep until the worker adds the current db to cache so that it can be found +-- when DROP EXTENSION. +-- FIXME: We cannot use wait_for_worker_new_epoch() here because +-- diskquota.state is not clean. Change sleep() to wait() after removing +-- diskquota.state +SELECT pg_sleep(1); + +-- Should find nothing since t_no_extension is not recorded. +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +DROP TABLE t; +DROP TABLE t_no_extension; + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_db_cache; From fac1476d129791d0b1930fe169a15bfc5438f021 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Tue, 22 Feb 2022 17:16:26 +0800 Subject: [PATCH 133/330] Wait after CREATE EXTENSION (#152) Each time after CREATE EXTENSION, we need to wait until the db cache gets updated. Otherwise no active table will be recorded. --- tests/regress/expected/test_update_db_cache.out | 7 +++++++ tests/regress/sql/test_update_db_cache.sql | 3 +++ 2 files changed, 10 insertions(+) diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out index 49837c93292..5bbc625d4f6 100644 --- a/tests/regress/expected/test_update_db_cache.out +++ b/tests/regress/expected/test_update_db_cache.out @@ -3,6 +3,13 @@ CREATE DATABASE test_db_cache; --end_ignore \c test_db_cache CREATE EXTENSION diskquota; +-- Wait until the db cache gets updated +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + CREATE TABLE t(i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql index 9fb70b56c9b..b35d84cd93f 100644 --- a/tests/regress/sql/test_update_db_cache.sql +++ b/tests/regress/sql/test_update_db_cache.sql @@ -5,6 +5,9 @@ CREATE DATABASE test_db_cache; \c test_db_cache CREATE EXTENSION diskquota; +-- Wait until the db cache gets updated +SELECT diskquota.wait_for_worker_new_epoch(); + CREATE TABLE t(i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); From f5abb37b3dfaffe8f4ecc03b43adc14eeda4a000 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 23 Feb 2022 10:10:35 +0800 Subject: [PATCH 134/330] Run tests on all platforms for PR pipeline (#153) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to catch more flaky tests from the beginning. Co-authored-by: Chen Mulong Co-authored-by: Xuebin Su (苏学斌) <12034000+xuebinsu@users.noreply.github.com> --- concourse/{pipeline => }/README.md | 0 concourse/pipeline/pr.yml | 10 +++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) rename concourse/{pipeline => }/README.md (100%) diff --git a/concourse/pipeline/README.md b/concourse/README.md similarity index 100% rename from concourse/pipeline/README.md rename to concourse/README.md diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 1ce668cc3f5..39fb8abd945 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -1,6 +1,9 @@ #@ load("job_def.lib.yml", #@ "build_test_job", +#@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", +#@ "rhel8_gpdb6_conf", +#@ "ubuntu18_gpdb6_conf" #@ ) #@ load("trigger_def.lib.yml", #@ "pr_trigger", @@ -14,7 +17,12 @@ #@ "res_map": res_map, #@ "gpdb_src": "gpdb6_src", #@ "trigger": pr_trigger(res_map), -#@ "confs": [centos7_gpdb6_conf()] +#@ "confs": [ +#@ centos6_gpdb6_conf(), +#@ centos7_gpdb6_conf(), +#@ rhel8_gpdb6_conf(), +#@ ubuntu18_gpdb6_conf() + ] #@ } jobs: - #@ build_test_job(job_param) From 11a038aced8922f4436708a2849cde750da34874 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 23 Feb 2022 17:09:13 +0800 Subject: [PATCH 135/330] Reset naptime when test finish (#147) Add tear_down to regress and isolation2 tests to reset the naptime. When naptime is 0, quite a lot of cpu will be used. It will be a problem if there are failing jobs on CI before getting reaped by the concourse. The concourse worker CPU will be occupied which may impact following tests. --- tests/isolation2/expected/reset_config.out | 10 ++++++++++ tests/isolation2/isolation2_schedule | 1 + tests/isolation2/sql/reset_config.sql | 4 ++++ tests/regress/diskquota_schedule | 1 + tests/regress/expected/reset_config.out | 6 ++++++ tests/regress/sql/reset_config.sql | 6 ++++++ 6 files changed, 28 insertions(+) create mode 100644 tests/isolation2/expected/reset_config.out create mode 100644 tests/isolation2/sql/reset_config.sql create mode 100644 tests/regress/expected/reset_config.out create mode 100644 tests/regress/sql/reset_config.sql diff --git a/tests/isolation2/expected/reset_config.out b/tests/isolation2/expected/reset_config.out new file mode 100644 index 00000000000..5fb1fb9f135 --- /dev/null +++ b/tests/isolation2/expected/reset_config.out @@ -0,0 +1,10 @@ +!\retcode gpconfig -c diskquota.naptime -v 10; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) + +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 10 +(1 row) diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 29ac7cba283..f9950222bfc 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -6,3 +6,4 @@ test: test_vacuum test: test_truncate test: test_worker_timeout test: test_drop_extension +test: reset_config diff --git a/tests/isolation2/sql/reset_config.sql b/tests/isolation2/sql/reset_config.sql new file mode 100644 index 00000000000..bfc2735d46c --- /dev/null +++ b/tests/isolation2/sql/reset_config.sql @@ -0,0 +1,4 @@ +!\retcode gpconfig -c diskquota.naptime -v 10; +!\retcode gpstop -u; + +1: SHOW diskquota.naptime; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index bae728b4904..bb87cd7759b 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -30,3 +30,4 @@ test: test_ctas_schema test: test_ctas_tablespace_role test: test_ctas_tablespace_schema test: test_drop_extension +test: reset_config diff --git a/tests/regress/expected/reset_config.out b/tests/regress/expected/reset_config.out new file mode 100644 index 00000000000..3b4afdbe031 --- /dev/null +++ b/tests/regress/expected/reset_config.out @@ -0,0 +1,6 @@ +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 10 +(1 row) + diff --git a/tests/regress/sql/reset_config.sql b/tests/regress/sql/reset_config.sql new file mode 100644 index 00000000000..9f9842a2828 --- /dev/null +++ b/tests/regress/sql/reset_config.sql @@ -0,0 +1,6 @@ +--start_ignore +\! gpconfig -c diskquota.naptime -v 10 +\! gpstop -u +--end_ignore + +SHOW diskquota.naptime; From 3e4381a3945e2f3f83abac413aa52be06b20dd1e Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 24 Feb 2022 12:15:33 +0800 Subject: [PATCH 136/330] Fix flaky test case in isolation2/test_relation_size.sql (#123) --- .../expected/test_relation_size.out | 20 +++++++++++++------ tests/isolation2/sql/test_relation_size.sql | 9 +++++++-- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/tests/isolation2/expected/test_relation_size.out b/tests/isolation2/expected/test_relation_size.out index b1dc3401a8d..45e9a9cc149 100644 --- a/tests/isolation2/expected/test_relation_size.out +++ b/tests/isolation2/expected/test_relation_size.out @@ -17,29 +17,37 @@ SELECT diskquota.relation_size('t_dropped'); (1 row) -- Inject 'suspension' to servers. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p'; +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; gp_inject_fault_infinite -------------------------- Success: Success: Success: - Success: -(4 rows) +(3 rows) -- Session 1 will hang before applying stat(2) to the physical file. 1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) -- Drop the table. DROP TABLE t_dropped; DROP -- Remove the injected 'suspension'. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p'; +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; gp_inject_fault_infinite -------------------------- Success: Success: Success: - Success: -(4 rows) +(3 rows) -- Session 1 will continue and returns 0. 1<: <... completed> relation_size diff --git a/tests/isolation2/sql/test_relation_size.sql b/tests/isolation2/sql/test_relation_size.sql index 4ccf61b104a..c8817f52e72 100644 --- a/tests/isolation2/sql/test_relation_size.sql +++ b/tests/isolation2/sql/test_relation_size.sql @@ -12,15 +12,20 @@ SELECT diskquota.relation_size('t_dropped'); -- Inject 'suspension' to servers. SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) - FROM gp_segment_configuration WHERE role='p'; + FROM gp_segment_configuration WHERE role='p' AND content>=0; -- Session 1 will hang before applying stat(2) to the physical file. 1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content>=0; -- Drop the table. DROP TABLE t_dropped; -- Remove the injected 'suspension'. SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) - FROM gp_segment_configuration WHERE role='p'; + FROM gp_segment_configuration WHERE role='p' AND content>=0; -- Session 1 will continue and returns 0. 1<: From 5838e5b4c624efda755f8b7099f52add631b24c4 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 24 Feb 2022 22:34:03 +0800 Subject: [PATCH 137/330] Set PR pending status when start (#157) Also fixed a syntax issue in pr.yml --- concourse/pipeline/job_def.lib.yml | 4 +++- concourse/pipeline/pr.yml | 2 +- concourse/pipeline/trigger_def.lib.yml | 16 ++++++++++------ 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index ac1abad61a8..815faa0cdaf 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -70,7 +70,9 @@ on_success: #@ trigger["on_success"] on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: -- #@ trigger["plan"] +#@ for trigger_plan in trigger["plans"]: +- #@ trigger_plan +#@ end - in_parallel: - get: gpdb_src resource: #@ param["gpdb_src"] diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 39fb8abd945..3cd4a55a5b6 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -22,7 +22,7 @@ #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), #@ ubuntu18_gpdb6_conf() - ] +#@ ] #@ } jobs: - #@ build_test_job(job_param) diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml index ad1928be38b..92e03df5421 100644 --- a/concourse/pipeline/trigger_def.lib.yml +++ b/concourse/pipeline/trigger_def.lib.yml @@ -3,12 +3,16 @@ #! PR trigger. For pull request pipelines #@ def pr_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_pr") -plan: - get: diskquota_src +plans: +- get: diskquota_src resource: diskquota_pr params: fetch_tags: true trigger: true +- put: diskquota_pr + params: + path: diskquota_src + status: pending on_failure: put: diskquota_pr params: @@ -29,8 +33,8 @@ on_success: #! Commit trigger. For master pipelines #@ def commit_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") -plan: - get: diskquota_src +plans: +- get: diskquota_src resource: diskquota_commit trigger: true #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. @@ -43,8 +47,8 @@ on_error: #! Commit trigger. For dev pipelines. No webhook #@ def commit_dev_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit_dev") -plan: - get: diskquota_src +plans: +- get: diskquota_src resource: diskquota_commit_dev trigger: true #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. From 36aafbd4b95e3965bf0048782a437aee98c573da Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 25 Feb 2022 09:47:38 +0800 Subject: [PATCH 138/330] Update CI README for PR trigger issue (#156) --- concourse/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/concourse/README.md b/concourse/README.md index 5e623fa5e58..79f9ffbaa95 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -63,3 +63,11 @@ To test if the webhook works, use `curl` to send a `POST` request to the hook UR ``` curl --data-raw "foo" ``` + +# FAQ + +## PR pipeline is not triggered. + +The PR pipeline relies on the webhook to detect the new PR. However, due the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/PR:diskquota/resources/diskquota_pr and click ⟳ button there. + +TIPS: Just don't fork, name your branch as `/` and push it here to create PR. From 671d7ed66f050a0d9e7c1662ac2139a9bd66f6d8 Mon Sep 17 00:00:00 2001 From: LXY Date: Fri, 25 Feb 2022 16:31:51 +0800 Subject: [PATCH 139/330] Make diskquota work for "pg_default" tablespace (#133) * Make diskquota work for "pg_default" tablespace (#133) Tracker story ID: #180688138. Setting role_tablespace quota in "pg_default" using `set_role_tablespace_quota` doesn't work. Due to a hack (src/backend/catalog/heap.c) in GPDB, the `reltablespace` of relations in "pg_default" is `InvalidOid`. When we refresh blackmap we use `DEFAULTTABLESPACE_OID` instead for those invalid tablespace, this results in a mismatch. This patch solves the problem by updating tablespace oid to the real tablespace oid (`MyDatabaseTableSpace`) when reading from syscache. Besides, with the hard limits on, we also need to update the tablespace oid to the real one. Co-authored-by: Xuebin Su Co-authored-by: Sasasu --- diskquota_utility.c | 1 + quotamodel.c | 36 ++-- relation_cache.c | 4 +- tests/isolation2/expected/test_blackmap.out | 40 ++-- tests/isolation2/sql/test_blackmap.sql | 181 ++++++++++-------- tests/regress/diskquota_schedule | 3 +- tests/regress/expected/test_blackmap.out | 38 ++-- .../expected/test_default_tablespace.out | 168 ++++++++++++++++ tests/regress/expected/test_extension.out | 28 +-- .../regress/expected/test_tablespace_role.out | 4 +- tests/regress/sql/test_blackmap.sql | 29 ++- tests/regress/sql/test_default_tablespace.sql | 107 +++++++++++ tests/regress/sql/test_extension.sql | 1 + tests/regress/sql/test_tablespace_role.sql | 2 +- 14 files changed, 491 insertions(+), 151 deletions(-) create mode 100644 tests/regress/expected/test_default_tablespace.out create mode 100644 tests/regress/sql/test_default_tablespace.sql diff --git a/diskquota_utility.c b/diskquota_utility.c index 8f093cc9c53..cd8b33fd1a1 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1355,6 +1355,7 @@ diskquota_relation_open(Oid relid, LOCKMODE mode) RESUME_INTERRUPTS(); } PG_END_TRY(); + return success_open ? rel : NULL; } diff --git a/quotamodel.c b/quotamodel.c index e63ab2bf6a5..f21630d18d7 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -298,7 +298,7 @@ add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool se } /* - * Check the quota map, if the entry doesn't exist any more, + * Check the quota map, if the entry doesn't exist anymore, * remove it from the map. Otherwise, check if it has hit * the quota limit, if it does, add it to the black list. */ @@ -330,11 +330,12 @@ check_quota_map(QuotaType type) if (entry->size >= entry->limit) { Oid targetOid = entry->keys[0]; - Oid tablespaceoid = - (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) ? entry->keys[1] : InvalidOid; /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespaceoid * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid */ + Oid tablespaceoid = + (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) ? entry->keys[1] : InvalidOid; + bool segmentExceeded = entry->segid == -1 ? false : true; add_quota_to_blacklist(type, targetOid, tablespaceoid, segmentExceeded); } @@ -854,6 +855,11 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) relnamespace = classForm->relnamespace; relowner = classForm->relowner; reltablespace = classForm->reltablespace; + + if (!OidIsValid(reltablespace)) + { + reltablespace = MyDatabaseTableSpace; + } } else { @@ -1449,6 +1455,12 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table *ownerOid = reltup->relowner; *nsOid = reltup->relnamespace; *tablespaceoid = reltup->reltablespace; + + if (!OidIsValid(*tablespaceoid)) + { + *tablespaceoid = MyDatabaseTableSpace; + } + ReleaseSysCache(tp); } return found; @@ -1775,15 +1787,11 @@ refresh_blackmap(PG_FUNCTION_ARGS) keyitem.databaseoid = DatumGetObjectId(GetAttributeByNum(lt, 2, &isnull)); keyitem.tablespaceoid = DatumGetObjectId(GetAttributeByNum(lt, 3, &isnull)); keyitem.targettype = DatumGetInt32(GetAttributeByNum(lt, 4, &isnull)); - /* - * If the current quota limit type is NAMESPACE_TABLESPACE_QUOTA or - * ROLE_TABLESPACE_QUOTA, we should explicitly set DEFAULTTABLESPACE_OID - * for relations whose reltablespace is InvalidOid. - */ - if ((keyitem.targettype == NAMESPACE_TABLESPACE_QUOTA || - keyitem.targettype == ROLE_TABLESPACE_QUOTA) && - !OidIsValid(keyitem.tablespaceoid)) - keyitem.tablespaceoid = DEFAULTTABLESPACE_OID; + /* blackmap entries from QD should have the real tablespace oid */ + if ((keyitem.targettype == NAMESPACE_TABLESPACE_QUOTA || keyitem.targettype == ROLE_TABLESPACE_QUOTA)) + { + Assert(OidIsValid(keyitem.tablespaceoid)); + } segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); blackmapentry = hash_search(local_blackmap, &keyitem, HASH_ENTER_NULL, NULL); @@ -1816,7 +1824,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) Form_pg_class form = (Form_pg_class) GETSTRUCT(tuple); Oid relnamespace = form->relnamespace; Oid reltablespace = OidIsValid(form->reltablespace) ? - form->reltablespace : DEFAULTTABLESPACE_OID; + form->reltablespace : MyDatabaseTableSpace; Oid relowner = form->relowner; BlackMapEntry keyitem; bool found; @@ -1878,7 +1886,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) Form_pg_class curr_form = (Form_pg_class) GETSTRUCT(curr_tuple); Oid curr_reltablespace = OidIsValid(curr_form->reltablespace) ? - curr_form->reltablespace : DEFAULTTABLESPACE_OID; + curr_form->reltablespace : MyDatabaseTableSpace; RelFileNode relfilenode = { .dbNode = MyDatabaseId, .relNode = curr_form->relfilenode, diff --git a/relation_cache.c b/relation_cache.c index ce285ef367b..b1794cdf9e1 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -446,7 +446,7 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relatio relation_entry->namespaceoid = classForm->relnamespace; relation_entry->relstorage = classForm->relstorage; relation_entry->rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? - classForm->reltablespace : DEFAULTTABLESPACE_OID; + classForm->reltablespace : MyDatabaseTableSpace; relation_entry->rnode.node.dbNode = MyDatabaseId; relation_entry->rnode.node.relNode = classForm->relfilenode; relation_entry->rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? @@ -537,7 +537,7 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) { classForm = (Form_pg_class) GETSTRUCT(classTup); rnode->node.spcNode = OidIsValid(classForm->reltablespace) ? - classForm->reltablespace : DEFAULTTABLESPACE_OID; + classForm->reltablespace : MyDatabaseTableSpace; rnode->node.dbNode = MyDatabaseId; rnode->node.relNode = classForm->relfilenode; rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out index 11d5afff176..7eacd4cfb66 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_blackmap.out @@ -3,7 +3,11 @@ -- queries in smgrextend hook by relation's relfilenode. -- -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW(targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ (SELECT reltablespace FROM pg_class WHERE relname=rel::text), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; CREATE @@ -44,7 +48,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -83,7 +87,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -122,7 +126,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -163,7 +167,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -195,7 +199,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -226,7 +230,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -268,11 +272,11 @@ CREATE -- This function replaces the oid appears in the auxiliary relation's name -- with the corresponding relname of that oid. -CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; CREATE -- This function helps dispatch blackmap for the given relation to seg0. -CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW(targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; CREATE -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. @@ -310,7 +314,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -355,7 +359,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=137774) +ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -400,7 +404,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -445,7 +449,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 role:10 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -490,7 +494,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -535,7 +539,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=137774) +ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -582,7 +586,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -630,7 +634,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -678,7 +682,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=137774) +ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_blackmap.sql index 1e7cd74b28e..408f637ce16 100644 --- a/tests/isolation2/sql/test_blackmap.sql +++ b/tests/isolation2/sql/test_blackmap.sql @@ -3,41 +3,65 @@ -- queries in smgrextend hook by relation's relfilenode. -- +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ + RETURNS oid AS /*in func*/ +$$ /*in func*/ +BEGIN /*in func*/ + /*in func*/ + CASE /*in func*/ + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ + ELSE RETURN ( /*in func*/ + CASE tablespaceoid /*in func*/ + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ + ELSE /*in func*/ + tablespaceoid /*in func*/ + END /*in func*/ + ); /*in func*/ + END CASE; /*in func*/ +END; /*in func*/ +$$ LANGUAGE plpgsql; /*in func*/ + CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) - RETURNS void AS $$ /*in func*/ - DECLARE /*in func*/ - bt int; /*in func*/ - targetoid oid; /*in func*/ - BEGIN /*in func*/ - CASE block_type /*in func*/ - WHEN 'NAMESPACE' THEN /*in func*/ - bt = 0; /*in func*/ - SELECT relnamespace INTO targetoid /*in func*/ - FROM pg_class WHERE relname=rel::text; /*in func*/ - WHEN 'ROLE' THEN /*in func*/ - bt = 1; /*in func*/ - SELECT relowner INTO targetoid /*in func*/ - FROM pg_class WHERE relname=rel::text; /*in func*/ - WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ - bt = 2; /*in func*/ - SELECT relnamespace INTO targetoid /*in func*/ - FROM pg_class WHERE relname=rel::text; /*in func*/ - WHEN 'ROLE_TABLESPACE' THEN /*in func*/ - bt = 3; /*in func*/ - SELECT relowner INTO targetoid /*in func*/ - FROM pg_class WHERE relname=rel::text; /*in func*/ - END CASE; /*in func*/ - PERFORM diskquota.refresh_blackmap( /*in func*/ - ARRAY[ /*in func*/ - ROW(targetoid, /*in func*/ - (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ - (SELECT reltablespace FROM pg_class WHERE relname=rel::text), /*in func*/ - bt, /*in func*/ - segexceeded) /*in func*/ - ]::diskquota.blackmap_entry[], /*in func*/ - ARRAY[rel]::oid[]) /*in func*/ - FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ - END; $$ /*in func*/ + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM pg_class WHERE relname=rel::text; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_blackmap( /*in func*/ + ARRAY[ /*in func*/ + ROW (targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ + (SELECT get_real_tablespace_oid( /*in func*/ + block_type, /*in func*/ + (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ + )), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.blackmap_entry[], /*in func*/ + ARRAY[rel]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ LANGUAGE 'plpgsql'; @@ -232,53 +256,58 @@ CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename te '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ + WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' + AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; -- This function helps dispatch blackmap for the given relation to seg0. CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) - RETURNS void AS $$ /*in func*/ - DECLARE /*in func*/ - bt int; /*in func*/ - targetoid oid; /*in func*/ - BEGIN /*in func*/ - CASE block_type /*in func*/ - WHEN 'NAMESPACE' THEN /*in func*/ - bt = 0; /*in func*/ - SELECT relnamespace INTO targetoid /*in func*/ - FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0; /*in func*/ - WHEN 'ROLE' THEN /*in func*/ - bt = 1; /*in func*/ - SELECT relowner INTO targetoid /*in func*/ - FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0; /*in func*/ - WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ - bt = 2; /*in func*/ - SELECT relnamespace INTO targetoid /*in func*/ - FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0; /*in func*/ - WHEN 'ROLE_TABLESPACE' THEN /*in func*/ - bt = 3; /*in func*/ - SELECT relowner INTO targetoid /*in func*/ - FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0; /*in func*/ - END CASE; /*in func*/ - PERFORM diskquota.refresh_blackmap( /*in func*/ - ARRAY[ /*in func*/ - ROW(targetoid, /*in func*/ - (SELECT oid FROM pg_database WHERE datname=current_database()), /*in func*/ - (SELECT reltablespace /*in func*/ - FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0), /*in func*/ - bt, /*in func*/ - segexceeded) /*in func*/ - ]::diskquota.blackmap_entry[], /*in func*/ - ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ - FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ - END; $$ /*in func*/ + RETURNS void AS $$ /*in func*/ + DECLARE /*in func*/ + bt int; /*in func*/ + targetoid oid; /*in func*/ + BEGIN /*in func*/ + CASE block_type /*in func*/ + WHEN 'NAMESPACE' THEN /*in func*/ + bt = 0; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE' THEN /*in func*/ + bt = 1; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ + bt = 2; /*in func*/ + SELECT relnamespace INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + WHEN 'ROLE_TABLESPACE' THEN /*in func*/ + bt = 3; /*in func*/ + SELECT relowner INTO targetoid /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0; /*in func*/ + END CASE; /*in func*/ + PERFORM diskquota.refresh_blackmap( /*in func*/ + ARRAY[ /*in func*/ + ROW (targetoid, /*in func*/ + (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ + (SELECT get_real_tablespace_oid( /*in func*/ + block_type, /*in func*/ + (SELECT reltablespace /*in func*/ + FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname = rel::text /*in func*/ + AND segid = 0) /*in func*/ + )), /*in func*/ + bt, /*in func*/ + segexceeded) /*in func*/ + ]::diskquota.blackmap_entry[], /*in func*/ + ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ + END; $$ /*in func*/ LANGUAGE 'plpgsql'; -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index bb87cd7759b..fa252b9a261 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -8,7 +8,7 @@ test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status test: test_update_db_cache -# disable this tese due to GPDB behavior change +# disable this test due to GPDB behavior change # test: test_table_size test: test_fast_disk_check #test: test_insert_after_drop @@ -29,5 +29,6 @@ test: test_ctas_role test: test_ctas_schema test: test_ctas_tablespace_role test: test_ctas_tablespace_schema +test: test_default_tablespace test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_blackmap.out index 9c9470bb079..45cdcc55edc 100644 --- a/tests/regress/expected/test_blackmap.out +++ b/tests/regress/expected/test_blackmap.out @@ -16,12 +16,33 @@ CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); END; $$ LANGUAGE plpgsql; +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) + RETURNS oid AS +$$ +BEGIN + CASE + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; + ELSE RETURN ( + CASE tablespaceoid + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) + ELSE + tablespaceoid + END + ); + END CASE; +END; +$$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) RETURNS void AS $$ DECLARE bt int; targetoid oid; + tablespaceoid oid; BEGIN + SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; CASE block_type WHEN 'NAMESPACE' THEN bt = 0; @@ -39,12 +60,12 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) bt = 3; SELECT relowner INTO targetoid FROM pg_class WHERE relname=rel::text; - END CASE; + END CASE; PERFORM diskquota.refresh_blackmap( ARRAY[ ROW(targetoid, (SELECT oid FROM pg_database WHERE datname=current_database()), - (SELECT reltablespace FROM pg_class WHERE relname=rel::text), + (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), bt, false) ]::diskquota.blackmap_entry[], @@ -54,11 +75,9 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) LANGUAGE 'plpgsql'; -- -- 1. Create an ordinary table and add its oid to blackmap on seg0. --- Check that it's relfilenode is blocked on seg0 by variouts conditions. +-- Check that it's relfilenode is blocked on seg0 by various conditions. -- CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Insert an entry for blocked_t1 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 @@ -140,8 +159,6 @@ SELECT rel.relname, be.target_type, -- index are blocked on seg0. -- CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Insert an entry for blocked_t2 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 @@ -170,8 +187,6 @@ SELECT replace_oid_with_relname(rel.relname), -- auxiliary relations are blocked on seg0. -- CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t3_index ON blocked_t3(i); -- Insert an entry for blocked_t3 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); @@ -204,8 +219,6 @@ SELECT replace_oid_with_relname(rel.relname), -- auxiliary relations are blocked on seg0. -- CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t4_index ON blocked_t4(i); -- Insert an entry for blocked_t4 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); @@ -238,8 +251,6 @@ SELECT replace_oid_with_relname(rel.relname), -- auxiliary relations are blocked on seg0. -- CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE INDEX blocked_t5_index ON blocked_t5(i); -- Insert an entry for blocked_t5 to blackmap on seg0. SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); @@ -272,6 +283,7 @@ SELECT replace_oid_with_relname(rel.relname), -- Do some clean-ups. DROP FUNCTION replace_oid_with_relname(text); DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP FUNCTION get_real_tablespace_oid(text, oid); DROP TABLE blocked_t1; DROP TABLE blocked_t2; DROP TABLE blocked_t3; diff --git a/tests/regress/expected/test_default_tablespace.out b/tests/regress/expected/test_default_tablespace.out new file mode 100644 index 00000000000..cd3556757c8 --- /dev/null +++ b/tests/regress/expected/test_default_tablespace.out @@ -0,0 +1,168 @@ +-- test role_tablespace_quota works with tables/databases in default tablespace +-- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on +-- start_ignore +\! mkdir -p /tmp/custom_tablespace +-- end_ignore +DROP ROLE if EXISTS role1; +NOTICE: role "role1" does not exist, skipping +DROP ROLE if EXISTS role2; +NOTICE: role "role2" does not exist, skipping +CREATE ROLE role1 SUPERUSER; +CREATE ROLE role2 SUPERUSER; +SET ROLE role1; +DROP TABLE if EXISTS t; +NOTICE: table "t" does not exist, skipping +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +INSERT INTO t SELECT generate_series(1, 100); +INSERT INTO t SELECT generate_series(1, 1000000); +-- expect insert to fail +INSERT INTO t SELECT generate_series(1, 1000000); +ERROR: tablespace:pg_default role:role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+------------+----------------------- + role1 | pg_default | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t; +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET ROLE role2; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail because of hard limits +INSERT INTO t SELECT generate_series(1, 50000000); +ERROR: tablespace:1663 role:3143588 diskquota exceeded (seg0 127.0.0.1:6002 pid=2298) +DROP TABLE IF EXISTS t; +SET ROLE role1; +-- database in customized tablespace +CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; +CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; +\c db_with_tablespace; +SET ROLE role1; +CREATE EXTENSION diskquota; +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +-- expect insert to fail +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +ERROR: tablespace:custom_tablespace role:role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+-------------------+----------------------- + role1 | custom_tablespace | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SET ROLE role2; +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +-- expect insert to fail because of hard limits +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); +ERROR: tablespace:3143595 role:3143588 diskquota exceeded (seg1 127.0.0.1:6003 pid=3260) +-- clean up +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION IF EXISTS diskquota; +\c contrib_regression; +DROP DATABASE IF EXISTS db_with_tablespace; +DROP TABLESPACE IF EXISTS custom_tablespace; +RESET ROLE; +DROP ROLE IF EXISTS role1; +DROP ROLE IF EXISTS role2; diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 01353a86882..6afd1980fa7 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -33,8 +33,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -54,8 +52,6 @@ DROP TABLE SX.a; \c dbx1 CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. @@ -77,6 +73,12 @@ SELECT diskquota.set_schema_quota('SX', '1MB'); (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 4 INSERT INTO SX.a values(generate_series(0, 10)); @@ -94,8 +96,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); 5 CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -124,8 +124,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -154,8 +152,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -184,8 +180,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -214,8 +208,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -244,8 +236,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -274,8 +264,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota ------------------ @@ -294,14 +282,14 @@ ERROR: schema's disk space quota exceeded with name:sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:286) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 SELECT diskquota.wait_for_worker_new_epoch(); ERROR: schema "diskquota" does not exist \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:287) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:286) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index a289f94ec9e..9c090ffaa34 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -8,9 +8,7 @@ CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; CREATE SCHEMA rolespcrole; SET search_path TO rolespcrole; DROP ROLE IF EXISTS rolespcu1; -NOTICE: role "rolespcu1" does not exist, skipping DROP ROLE IF EXISTS rolespcu2; -NOTICE: role "rolespcu2" does not exist, skipping CREATE ROLE rolespcu1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespcu2 NOLOGIN; @@ -52,7 +50,7 @@ ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded --- Test show_fast_schema_tablespace_quota_view +-- Test show_fast_role_tablespace_quota_view SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes -----------+-----------------+-------------+----------------------------- diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_blackmap.sql index 47b6b783185..545c688d4ca 100644 --- a/tests/regress/sql/test_blackmap.sql +++ b/tests/regress/sql/test_blackmap.sql @@ -19,12 +19,34 @@ CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) END; $$ LANGUAGE plpgsql; +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) + RETURNS oid AS +$$ +BEGIN + CASE + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; + ELSE RETURN ( + CASE tablespaceoid + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) + ELSE + tablespaceoid + END + ); + END CASE; +END; +$$ LANGUAGE plpgsql; + CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) RETURNS void AS $$ DECLARE bt int; targetoid oid; + tablespaceoid oid; BEGIN + SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; CASE block_type WHEN 'NAMESPACE' THEN bt = 0; @@ -42,12 +64,12 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) bt = 3; SELECT relowner INTO targetoid FROM pg_class WHERE relname=rel::text; - END CASE; + END CASE; PERFORM diskquota.refresh_blackmap( ARRAY[ ROW(targetoid, (SELECT oid FROM pg_database WHERE datname=current_database()), - (SELECT reltablespace FROM pg_class WHERE relname=rel::text), + (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), bt, false) ]::diskquota.blackmap_entry[], @@ -58,7 +80,7 @@ LANGUAGE 'plpgsql'; -- -- 1. Create an ordinary table and add its oid to blackmap on seg0. --- Check that it's relfilenode is blocked on seg0 by variouts conditions. +-- Check that it's relfilenode is blocked on seg0 by various conditions. -- CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); @@ -185,6 +207,7 @@ SELECT replace_oid_with_relname(rel.relname), -- Do some clean-ups. DROP FUNCTION replace_oid_with_relname(text); DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP FUNCTION get_real_tablespace_oid(text, oid); DROP TABLE blocked_t1; DROP TABLE blocked_t2; DROP TABLE blocked_t3; diff --git a/tests/regress/sql/test_default_tablespace.sql b/tests/regress/sql/test_default_tablespace.sql new file mode 100644 index 00000000000..1bf915cbf81 --- /dev/null +++ b/tests/regress/sql/test_default_tablespace.sql @@ -0,0 +1,107 @@ +-- test role_tablespace_quota works with tables/databases in default tablespace +-- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on + +-- start_ignore +\! mkdir -p /tmp/custom_tablespace +-- end_ignore + +DROP ROLE if EXISTS role1; +DROP ROLE if EXISTS role2; +CREATE ROLE role1 SUPERUSER; +CREATE ROLE role2 SUPERUSER; +SET ROLE role1; + +DROP TABLE if EXISTS t; +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to success +INSERT INTO t SELECT generate_series(1, 100); +INSERT INTO t SELECT generate_series(1, 1000000); +-- expect insert to fail +INSERT INTO t SELECT generate_series(1, 1000000); + +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + +DROP TABLE IF EXISTS t; +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); + +SET ROLE role2; +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to fail because of hard limits +INSERT INTO t SELECT generate_series(1, 50000000); +DROP TABLE IF EXISTS t; + +SET ROLE role1; +-- database in customized tablespace +CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; +CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; +\c db_with_tablespace; +SET ROLE role1; +CREATE EXTENSION diskquota; + +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect insert to success +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +-- expect insert to fail +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); + +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + +DROP TABLE IF EXISTS t_in_custom_tablespace; +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); +SELECT diskquota.wait_for_worker_new_epoch(); +SET ROLE role2; + +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +DROP TABLE IF EXISTS t_in_custom_tablespace; +-- expect insert to fail because of hard limits +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); + +-- clean up +DROP TABLE IF EXISTS t_in_custom_tablespace; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION IF EXISTS diskquota; + +\c contrib_regression; +DROP DATABASE IF EXISTS db_with_tablespace; +DROP TABLESPACE IF EXISTS custom_tablespace; + +RESET ROLE; +DROP ROLE IF EXISTS role1; +DROP ROLE IF EXISTS role2; diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 40d3f09e51d..a3003957aef 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -39,6 +39,7 @@ CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); SELECT diskquota.wait_for_worker_new_epoch(); SELECT diskquota.set_schema_quota('SX', '1MB'); +SELECT diskquota.wait_for_worker_new_epoch(); \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index 94afbeed69d..dd84118722b 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -29,7 +29,7 @@ INSERT INTO b SELECT generate_series(1,100); -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); --- Test show_fast_schema_tablespace_quota_view +-- Test show_fast_role_tablespace_quota_view SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; -- Test alter owner From 1227dd21a7dd82e806555ba648227c03c8051228 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 28 Feb 2022 17:47:20 +0800 Subject: [PATCH 140/330] build: use cmake as build system (#161) --- CMakeLists.txt | 79 ++++++++++++++++++++++++++++ Makefile | 15 ------ VERSION | 1 + cmake/Gpdb.cmake | 49 +++++++++++++++++ concourse/scripts/build_diskquota.sh | 11 ++-- diskquota.c | 1 + 6 files changed, 138 insertions(+), 18 deletions(-) create mode 100644 CMakeLists.txt create mode 100644 VERSION create mode 100644 cmake/Gpdb.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000000..28fe71fe293 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,79 @@ +cmake_minimum_required(VERSION 3.13) +project(diskquota) + +# generate 'compile_commands.json' +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Gpdb.cmake) + +# set include directories for all sub-projects +include_directories(${PG_INCLUDE_DIR_SERVER}) +include_directories(${PG_INCLUDE_DIR}) # for libpq +# set link flags for all sub-projects +set(CMAKE_SHARED_LINKER_FLAGS "${PG_LD_FLAGS}") +# set c and ld flags for all projects +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${PG_C_FLAGS}") +set(CMAKE_MODULE_LINKER_FLAGS_INIT "${CMAKE_EXE_LINKER_FLAGS} ${PG_LD_FLAGS}") + +# generate version +if(NOT DEFINED DISKQUOTA_VERSION) + file(STRINGS VERSION DISKQUOTA_VERSION) +endif() + +string(REGEX REPLACE "^([0-9]+).[0-9]+.[0-9]+$" "\\1" DISKQUOTA_MAJOR_VERSION + ${DISKQUOTA_VERSION}) +string(REGEX REPLACE "^[0-9]+.([0-9]+).[0-9]+$" "\\1" DISKQUOTA_MINOR_VERSION + ${DISKQUOTA_VERSION}) +string(REGEX REPLACE "^[0-9]+.[0-9]+.([0-9]+)$" "\\1" DISKQUOTA_PATCH_VERSION + ${DISKQUOTA_VERSION}) + +set(CMAKE_C_FLAGS + "${CMAKE_C_FLAGS} \ + -DDISKQUOTA_VERSION=\"${DISKQUOTA_VERSION}\" \ + -DDISKQUOTA_MAJOR_VERSION=\"${DISKQUOTA_MAJOR_VERSION}\" \ + -DDISKQUOTA_MINOR_VERSION=\"${DISKQUOTA_MINOR_VERSION}\" \ + -DDISKQUOTA_PATCH_VERSION=\"${DISKQUOTA_PATCH_VERSION}\"") + +if("${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}" STREQUAL "1.0") + # in special, version 1.0.x do not has suffix + set(DISKQUOTA_BINARY_NAME "diskquota") +else() + set(DISKQUOTA_BINARY_NAME + "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}") +endif() + +list( + APPEND + diskquota_SRC + diskquota.c + diskquota_utility.c + enforcement.c + gp_activetable.c + quotamodel.c + relation_cache.c) + +list( + APPEND + diskquota_DDL + diskquota.control + diskquota--1.0.sql + diskquota--1.0--2.0.sql + diskquota--2.0.sql + diskquota--2.0--1.0.sql) + +add_library(diskquota MODULE ${diskquota_SRC}) + +# pg_config do not give us the `--prefix` we use `../../` as a workaround +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + set(CMAKE_INSTALL_PREFIX "${PG_PKG_LIB_DIR}/../../") +endif() + +set_target_properties( + diskquota + PROPERTIES OUTPUT_NAME ${DISKQUOTA_BINARY_NAME} + PREFIX "" + C_STANDARD 99 + LINKER_LANGUAGE "CXX") + +install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") +install(TARGETS diskquota DESTINATION "lib/postgresql/") diff --git a/Makefile b/Makefile index 662227225a1..08205742d95 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,3 @@ -# contrib/diskquota/Makefile - -MODULE_big = diskquota - -EXTENSION = diskquota -DATA = diskquota--1.0.sql diskquota--2.0.sql diskquota--1.0--2.0.sql diskquota--2.0--1.0.sql -SRCDIR = ./ -FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c relation_cache.c -OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o relation_cache.o -PG_CPPFLAGS = -I$(libpq_srcdir) -SHLIB_LINK = $(libpq) - -PGXS := $(shell pg_config --pgxs) -include $(PGXS) - .PHONY: installcheck installcheck: $(MAKE) -C tests installcheck-regress diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000..21e8796a09d --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.0.3 diff --git a/cmake/Gpdb.cmake b/cmake/Gpdb.cmake new file mode 100644 index 00000000000..91f44200af3 --- /dev/null +++ b/cmake/Gpdb.cmake @@ -0,0 +1,49 @@ +# Use pg_config to detect postgres dependencies +# +# Variables: +# +# PG_CONFIG - the path to the pg_config executable to be used. this determines the +# version to be built with. +# GP_MAJOR_VERSION - the major version parsed from gpdb source +# PG_BIN_DIR - location of user executables +# PG_INCLUDE_DIR - location of C header files of the client +# PG_INCLUDE_DIR_SERVER - location of C header files for the server +# PG_LIBS - LIBS value used when PostgreSQL was built +# PG_LIB_DIR - location of object code libraries +# PG_PKG_LIB_DIR - location of dynamically loadable modules +# PG_SHARE_DIR - location of architecture-independent support files +# PG_PGXS - location of extension makefile +# PG_CPP_FLAGS - CPPFLAGS value used when PostgreSQL was built +# PG_C_FLAGS - CFLAGS value used when PostgreSQL was built +# PG_LD_FLAGS - LDFLAGS value used when PostgreSQL was built +# PG_HOME - The installation directory of Greenplum + +include_guard() +find_program(PG_CONFIG pg_config) +if(PG_CONFIG) + message(STATUS "Use '${PG_CONFIG}'") +else() + message(FATAL_ERROR "Unable to find 'pg_config'") +endif() +exec_program(${PG_CONFIG} ARGS --includedir OUTPUT_VARIABLE PG_INCLUDE_DIR) +exec_program(${PG_CONFIG} ARGS --includedir-server OUTPUT_VARIABLE PG_INCLUDE_DIR_SERVER) +exec_program(${PG_CONFIG} ARGS --pkglibdir OUTPUT_VARIABLE PG_PKG_LIB_DIR) +exec_program(${PG_CONFIG} ARGS --sharedir OUTPUT_VARIABLE PG_SHARE_DIR) +exec_program(${PG_CONFIG} ARGS --bindir OUTPUT_VARIABLE PG_BIN_DIR) +exec_program(${PG_CONFIG} ARGS --cppflags OUTPUT_VARIABLE PG_CPP_FLAGS) +exec_program(${PG_CONFIG} ARGS --cflags OUTPUT_VARIABLE PG_C_FLAGS) +exec_program(${PG_CONFIG} ARGS --ldflags OUTPUT_VARIABLE PG_LD_FLAGS) +exec_program(${PG_CONFIG} ARGS --libs OUTPUT_VARIABLE PG_LIBS) +exec_program(${PG_CONFIG} ARGS --libdir OUTPUT_VARIABLE PG_LIB_DIR) +exec_program(${PG_CONFIG} ARGS --pgxs OUTPUT_VARIABLE PG_PGXS) +get_filename_component(PG_HOME "${PG_BIN_DIR}/.." ABSOLUTE) + +# Get the GP_MAJOR_VERSION from header +file(READ ${PG_INCLUDE_DIR}/pg_config.h config_header) +string(REGEX MATCH "#define *GP_MAJORVERSION *\"[0-9]+\"" macrodef "${config_header}") +string(REGEX MATCH "[0-9]+" GP_MAJOR_VERSION "${macrodef}") +if (GP_MAJOR_VERSION) + message(STATUS "Build extension for GPDB ${GP_MAJOR_VERSION}") +else() + message(FATAL_ERROR "Cannot read GP_MAJORVERSION from '${PG_INCLUDE_DIR}/pg_config.h'") +endif() diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 66638baebf8..d107bd4549c 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -10,18 +10,23 @@ function pkg() { [ -f /opt/gcc_env.sh ] && source /opt/gcc_env.sh source /usr/local/greenplum-db-devel/greenplum_path.sh + if [ "${DISKQUOTA_OS}" = "rhel6" ]; then + export CC="$(which gcc)" + fi + export USE_PGXS=1 pushd diskquota_src/ DISKQUOTA_VERSION=$(git describe --tags) - make clean - make install + mkdir build + cmake -B build . + make -C build install popd pushd /usr/local/greenplum-db-devel/ echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component chmod a+x install_gpdb_component tar -czf "$TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-${DISKQUOTA_OS}_x86_64.tar.gz" \ - "lib/postgresql/diskquota.so" \ + lib/postgresql/diskquota*.so \ "share/postgresql/extension/diskquota.control" \ "share/postgresql/extension/diskquota--1.0.sql" \ "share/postgresql/extension/diskquota--2.0.sql" \ diff --git a/diskquota.c b/diskquota.c index 50df27e3b71..6f51f64dcc6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -41,6 +41,7 @@ PG_MODULE_MAGIC; #define DISKQUOTA_DB "diskquota" #define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" +#include // for useconds_t extern int usleep(useconds_t usec); // in /* flags set by signal handlers */ From a24e56ff07f8d8b7a8c34dfad35c6021d3f6fd75 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 3 Mar 2022 11:44:55 +0800 Subject: [PATCH 141/330] rename library file to diskquota-.so (#162) rename library file to diskquota-.so, this will break the old gpconfig -v shared_preload_libraries command, please note. centos6 and centos7 only have cmake 2.8.12, use cmake 2.8.10 as the minimum version. use PG_HOME and remove the workaround. use cpack to create the installer. clean up SQL define. --- CMakeLists.txt | 41 +++- README.md | 2 +- VERSION | 2 +- cmake/Distro.cmake | 39 ++++ cmake/current_binary_name | 9 + cmake/install_gpdb_component | 2 + concourse/scripts/test_common.sh | 3 - diskquota--1.0--2.0.sql | 250 +++++++++++--------- diskquota--1.0.sql | 105 ++++----- diskquota--2.0--1.0.sql | 142 +++++++----- diskquota--2.0.sql | 332 ++++++++++++--------------- diskquota.c | 18 +- diskquota.control | 2 +- tests/isolation2/expected/config.out | 3 +- tests/isolation2/sql/config.sql | 3 +- tests/regress/Makefile | 4 - tests/regress/diskquota_schedule_int | 12 - tests/regress/sql/config.sql | 2 +- upgrade_test/expected/init.out | 2 +- upgrade_test/sql/init.sql | 2 +- 20 files changed, 529 insertions(+), 446 deletions(-) create mode 100644 cmake/Distro.cmake create mode 100755 cmake/current_binary_name create mode 100755 cmake/install_gpdb_component delete mode 100644 tests/regress/diskquota_schedule_int diff --git a/CMakeLists.txt b/CMakeLists.txt index 28fe71fe293..cdbb4667bc0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.13) +cmake_minimum_required(VERSION 2.8.12) project(diskquota) # generate 'compile_commands.json' @@ -27,13 +27,6 @@ string(REGEX REPLACE "^[0-9]+.([0-9]+).[0-9]+$" "\\1" DISKQUOTA_MINOR_VERSION string(REGEX REPLACE "^[0-9]+.[0-9]+.([0-9]+)$" "\\1" DISKQUOTA_PATCH_VERSION ${DISKQUOTA_VERSION}) -set(CMAKE_C_FLAGS - "${CMAKE_C_FLAGS} \ - -DDISKQUOTA_VERSION=\"${DISKQUOTA_VERSION}\" \ - -DDISKQUOTA_MAJOR_VERSION=\"${DISKQUOTA_MAJOR_VERSION}\" \ - -DDISKQUOTA_MINOR_VERSION=\"${DISKQUOTA_MINOR_VERSION}\" \ - -DDISKQUOTA_PATCH_VERSION=\"${DISKQUOTA_PATCH_VERSION}\"") - if("${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}" STREQUAL "1.0") # in special, version 1.0.x do not has suffix set(DISKQUOTA_BINARY_NAME "diskquota") @@ -42,6 +35,14 @@ else() "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}") endif() +set(CMAKE_C_FLAGS + "${CMAKE_C_FLAGS} \ + -DDISKQUOTA_VERSION='\"${DISKQUOTA_VERSION}\"' \ + -DDISKQUOTA_MAJOR_VERSION='\"${DISKQUOTA_MAJOR_VERSION}\"' \ + -DDISKQUOTA_MINOR_VERSION='\"${DISKQUOTA_MINOR_VERSION}\"' \ + -DDISKQUOTA_PATCH_VERSION='\"${DISKQUOTA_PATCH_VERSION}\"' \ + -DDISKQUOTA_BINARY_NAME='\"${DISKQUOTA_BINARY_NAME}\"'") + list( APPEND diskquota_SRC @@ -63,9 +64,8 @@ list( add_library(diskquota MODULE ${diskquota_SRC}) -# pg_config do not give us the `--prefix` we use `../../` as a workaround if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) - set(CMAKE_INSTALL_PREFIX "${PG_PKG_LIB_DIR}/../../") + set(CMAKE_INSTALL_PREFIX "${PG_HOME}") endif() set_target_properties( @@ -75,5 +75,26 @@ set_target_properties( C_STANDARD 99 LINKER_LANGUAGE "CXX") +# packing part, move to a separate file if this part is too large +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) + +if(DEFINED DISKQUOTA_PREVIOUS_INSTALLER) + message(STATUS "Copy pervious installer from ${DISKQUOTA_PREVIOUS_INSTALLER}") + file(ARCHIVE_EXTRACT INPUT ${DISKQUOTA_PREVIOUS_INSTALLER} PATTERNS "*.so") + file(GLOB DISKQUOTA_PREVIOUS_LIBRARY + "${CMAKE_BINARY_DIR}/lib/postgresql/*.so") + install(PROGRAMS ${DISKQUOTA_PREVIOUS_LIBRARY} DESTINATION "lib/postgresql/") +endif() + +set(CPACK_GENERATOR "TGZ") +set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) +set(CPACK_PACKAGE_FILE_NAME + "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}.${DISKQUOTA_PATCH_VERSION}-${DISTRO_NAME}_x86_64" +) +include(CPack) +# packing end + +# NOTE: keep install part at the end of file, to overwrite previous binary +install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") install(TARGETS diskquota DESTINATION "lib/postgresql/") diff --git a/README.md b/README.md index c99d7b1cced..ea431fc22b3 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ create database diskquota; 3. Enable diskquota as preload library ``` # enable diskquota in preload library. -gpconfig -c shared_preload_libraries -v 'diskquota' +gpconfig -c shared_preload_libraries -v 'diskquota-' # restart database. gpstop -ar ``` diff --git a/VERSION b/VERSION index 21e8796a09d..227cea21564 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.3 +2.0.0 diff --git a/cmake/Distro.cmake b/cmake/Distro.cmake new file mode 100644 index 00000000000..c619808b730 --- /dev/null +++ b/cmake/Distro.cmake @@ -0,0 +1,39 @@ +# Cmake utility to identify the distribution names. Currently Below distributions +# can be identified: +# - centos6 +# - centos7 +# - unbuntu18 +include_guard() + +set(DISTRO_NAME "" CACHE STRING "Distribution name of current build environment") + +if(NOT DISTRO_NAME) + SET(DISTRO_NAME unknown) + if(EXISTS "/etc/redhat-release") + file(READ /etc/redhat-release rh_release) + string(REGEX MATCH "CentOS release 6.*" matched6 "${rh_release}") + string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") + string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") + string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") + if (matched6) + set(DISTRO_NAME rhel6) + elseif(matched7) + set(DISTRO_NAME rhel7) + elseif(matched_rhel8 OR matched_centos8) + set(DISTRO_NAME rhel8) + endif() + elseif(EXISTS "/etc/os-release") + file(READ /etc/os-release os_release) + string(REGEX MATCH "ID=ubuntu" isubuntu "${os_release}") + string(REGEX MATCH "VERSION_ID=\"18.04\"" matched1804 "${os_release}") + if (isubuntu AND matched1804) + SET(DISTRO_NAME ubuntu18.04) + endif() + + string(REGEX MATCH "ID=photon" isphoton "${os_release}") + string(REGEX MATCH "VERSION_ID=3.0" matched30 "${os_release}") + if (isphoton AND matched30) + SET(DISTRO_NAME photon3) + endif() + endif() +endif() diff --git a/cmake/current_binary_name b/cmake/current_binary_name new file mode 100755 index 00000000000..475789a561d --- /dev/null +++ b/cmake/current_binary_name @@ -0,0 +1,9 @@ +#!/bin/bash + +cd "$(dirname "$0")" + +if [ $(grep -P '^1.0' ../VERSION) ]; then + echo -n "diskquota.so" +else + echo -n "diskquota-$(grep -o -P '^\d+.\d+' ../VERSION).so" +fi diff --git a/cmake/install_gpdb_component b/cmake/install_gpdb_component new file mode 100755 index 00000000000..9929df4f7ed --- /dev/null +++ b/cmake/install_gpdb_component @@ -0,0 +1,2 @@ +#!/bin/bash +cp -r lib share $GPHOME || exit 1 diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh index fde28800738..d800e6132bb 100644 --- a/concourse/scripts/test_common.sh +++ b/concourse/scripts/test_common.sh @@ -9,9 +9,6 @@ function test(){ echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh source /usr/local/greenplum-db-devel/greenplum_path.sh createdb diskquota - gpconfig -c shared_preload_libraries -v 'diskquota' - gpstop -arf - gpconfig -c diskquota.naptime -v 1 gpstop -arf # the dir to run the "make install" command pushd $1 diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index adc7899485e..74fa0d410e2 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -1,115 +1,155 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.0.so + +-- table part ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT -1; CREATE TABLE diskquota.target ( - quotatype int, --REFERENCES disquota.quota_config.quotatype, - primaryOid oid, - tablespaceOid oid, --REFERENCES pg_tablespace.oid, - PRIMARY KEY (primaryOid, tablespaceOid, quotatype) + quotatype int, -- REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, -- REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) ); +-- TODO ALTER TABLE diskquota.target SET DEPENDS ON EXTENSION diskquota; -CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.pause() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_pause' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.resume() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_resume' -LANGUAGE C; - -CREATE TYPE diskquota.blackmap_entry AS - (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); -CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE TYPE diskquota.blackmap_entry_detail AS - (target_type text, target_oid oid, database_oid oid, - tablespace_oid oid, seg_exceeded boolean, dbnode oid, spcnode oid, relnode oid, segid int); - -CREATE FUNCTION diskquota.show_blackmap() -RETURNS setof diskquota.blackmap_entry_detail -AS 'MODULE_PATHNAME', 'show_blackmap' -LANGUAGE C; - -CREATE VIEW diskquota.blackmap AS - SELECT * FROM diskquota.show_blackmap() AS BM; - -ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; +ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; -- segid = coordinator means table size in cluster level ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; -ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid,segid); - -CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 and ts.segid=-1 -group by relnamespace, qc.quotalimitMB, pgns.nspname -order by pgns.nspname; - -CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 and ts.segid=-1 +ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid, segid); +ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid, segid); + +-- TODO SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +-- TODO SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); +-- table part end + +-- type define +ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; + +CREATE TYPE diskquota.blackmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.blackmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[] +); +-- type define end + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; + +-- TODO solve dependency DROP FUNCTION diskquota.update_diskquota_db_list(oid, int4); + +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_blackmap() RETURNS setof diskquota.blackmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_blackmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local( + reltablespace oid, + relfilenode oid, + relpersistence "char", + relstorage "char") +RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + +-- views +CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND qc.quotatype = 0 AND ts.segid = -1 +GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname +ORDER BY pgns.nspname; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND qc.quotatype = 1 AND ts.segid = -1 GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; -CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespace_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns, - pg_tablespace as pgsp, - diskquota.target as t -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 -group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname -order by pgns.nspname, pgsp.spcname; - -CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_tablespace_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr, - pg_tablespace as pgsp, - diskquota.target as t -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_tablespace_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns, + pg_tablespace AS pgsp, + diskquota.target AS t +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND pgsp.oid = pgc.reltablespace AND qc.quotatype = 2 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid = -1 +GROUP BY relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname +ORDER BY pgns.nspname, pgsp.spcname; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_tablespace_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr, + pg_tablespace AS pgsp, + diskquota.target AS t +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND pgsp.oid = pgc.reltablespace AND qc.quotatype = 3 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid = -1 GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; +-- views end -CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS -SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1)) AS dbsize; - --- Need to drop the old type and functions, then recreate them to make the gpdb to reload the new functions -DROP FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]); -DROP TYPE diskquota.diskquota_active_table_type; -CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8, "GP_SEGMENT_ID" smallint); -CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type -AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' -LANGUAGE C VOLATILE; - --- returns the current status in current database -CREATE OR REPLACE FUNCTION diskquota.status() -RETURNS TABLE ("name" text, "status" text) STRICT -AS 'MODULE_PATHNAME', 'diskquota_status' -LANGUAGE C; diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 282758a8a14..827b682734e 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -1,74 +1,71 @@ -/* contrib/diskquota/diskquota--1.0.sql */ - -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION diskquota" to load this file. \quit CREATE SCHEMA diskquota; -- Configuration table -CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)) DISTRIBUTED BY (targetOid, quotatype); - +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + PRIMARY KEY(tableid) +); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- only diskquota.quota_config is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); - -CREATE FUNCTION diskquota.set_schema_quota(text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION diskquota.set_role_quota(text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); -CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8 +); -CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)) DISTRIBUTED BY (state); - -INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; - -CREATE FUNCTION diskquota.diskquota_start_worker() -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION diskquota.init_table_size_table() -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; CREATE VIEW diskquota.show_fast_schema_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace -group by relnamespace, qc.quotalimitMB, pgns.nspname -order by pgns.nspname; +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace +GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname +ORDER BY pgns.nspname; CREATE VIEW diskquota.show_fast_role_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; - -CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size) +) AS dbsize; -CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type -AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' -LANGUAGE C VOLATILE; +-- prepare to boot +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; SELECT diskquota.diskquota_start_worker(); DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index 6bd6cd71ac1..fbde6e98a82 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -1,67 +1,85 @@ -DROP FUNCTION IF EXISTS diskquota.set_schema_tablespace_quota(text, text, text); - -DROP FUNCTION IF EXISTS diskquota.set_role_tablespace_quota(text, text, text); - -DROP FUNCTION IF EXISTS diskquota.set_per_segment_quota(text, float4); - -DROP FUNCTION IF EXISTS diskquota.pause(); - -DROP FUNCTION IF EXISTS diskquota.resume(); - -DROP FUNCTION IF EXISTS diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); - -DROP FUNCTION IF EXISTS diskquota.status(); - -DROP TYPE IF EXISTS diskquota.blackmap_entry; - -DROP VIEW IF EXISTS diskquota.blackmap; - -DROP FUNCTION IF EXISTS diskquota.show_blackmap(); - -DROP TYPE IF EXISTS diskquota.blackmap_entry_detail; - -CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace -group by relnamespace, qc.quotalimitMB, pgns.nspname -order by pgns.nspname; - -CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid +-- TODO check if worker should not refresh, current lib should be diskquota.so + +-- views +DROP VIEW diskquota.blackmap; +DROP VIEW diskquota.show_fast_schema_tablespace_quota_view; +DROP VIEW diskquota.show_fast_role_tablespace_quota_view; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size) +) AS dbsize; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace +GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname +ORDER BY pgns.nspname; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; +-- views part end + +-- UDF +-- TODO find a way to use ALTER FUNCTION +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +/* 1.0--2.0 can not drop this UDF */ CREATE OR REPLACE FUNCTION diskquota.update_diskquota_db_list(oid, int4) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +-- TODO find a way to run it in Postgresql 9.4 ALTER FUNCTION diskquota.update_diskquota_db_list(oid, int4) DEPENDS ON EXTENSION diskquota; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; + +DROP FUNCTION diskquota.set_schema_tablespace_quota(text, text, text); +DROP FUNCTION diskquota.set_role_tablespace_quota(text, text, text); +DROP FUNCTION diskquota.set_per_segment_quota(text, float4); +DROP FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); +DROP FUNCTION diskquota.show_blackmap(); +DROP FUNCTION diskquota.pause(); +DROP FUNCTION diskquota.resume(); +DROP FUNCTION diskquota.show_worker_epoch(); +DROP FUNCTION diskquota.wait_for_worker_new_epoch(); +DROP FUNCTION diskquota.status(); +DROP FUNCTION diskquota.show_relation_cache(); +DROP FUNCTION diskquota.relation_size_local( + reltablespace oid, + relfilenode oid, + relpersistence "char", + relstorage "char"); +DROP FUNCTION diskquota.relation_size(relation regclass); +DROP FUNCTION diskquota.show_relation_cache_all_seg(); +-- UDF end + +-- table part +-- clean up schema_tablespace quota AND rolsize_tablespace quota +DELETE FROM diskquota.quota_config WHERE quotatype = 2 or quotatype = 3; + +DROP TABLE diskquota.target; -DROP VIEW IF EXISTS diskquota.show_fast_schema_tablespace_quota_view; -DROP VIEW IF EXISTS diskquota.show_fast_role_tablespace_quota_view; - -CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS -SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size)) AS dbsize; - --- Need to drop the old type and functions, then recreate them to make the gpdb to reload the new functions -DROP FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]); -DROP TYPE diskquota.diskquota_active_table_type; -CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8); -CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type -AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' -LANGUAGE C VOLATILE; - -DROP TABLE IF EXISTS diskquota.target; ALTER TABLE diskquota.quota_config DROP COLUMN segratio; --- clean table_size and frop segid column --- delete segments table size -DELETE FROM diskquota.table_size WHERE segid != -1; --- delete tablespace quota config -DELETE FROM diskquota.quota_config WHERE quotatype=2 or quotatype=3; + +ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid); ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; -ALTER TABLE diskquota.table_size SET DISTRIBUTED RANDOMLY; -ALTER TABLE diskquota.table_size DROP COLUMN segid; -ALTER TABLE diskquota.table_size SET DISTRIBUTED BY (tableid); +-- clean up pre segments size information, 1.0 do not has this feature +DELETE FROM diskquota.table_size WHERE segid != -1; ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid); +ALTER TABLE diskquota.table_size DROP COLUMN segid; +-- table part end + +-- type part +ALTER TYPE diskquota.diskquota_active_table_type DROP ATTRIBUTE "GP_SEGMENT_ID"; +DROP TYPE diskquota.blackmap_entry; +DROP TYPE diskquota.blackmap_entry_detail; +DROP TYPE diskquota.relation_cache_detail; +-- type part end diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 34a9ee811b1..7e2b706b17c 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -1,214 +1,180 @@ -/* contrib/diskquota/diskquota--2.0.sql */ - -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION diskquota" to load this file. \quit CREATE SCHEMA diskquota; --- Configuration table -CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, segratio float4 DEFAULT -1, PRIMARY KEY(targetOid, quotatype)) DISTRIBUTED BY (targetOid, quotatype); +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT -1, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( - quotatype int, --REFERENCES disquota.quota_config.quotatype, - primaryOid oid, - tablespaceOid oid, --REFERENCES pg_tablespace.oid, - PRIMARY KEY (primaryOid, tablespaceOid, quotatype) + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) ); +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); - -CREATE FUNCTION diskquota.set_schema_quota(text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION diskquota.set_role_quota(text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE TYPE diskquota.blackmap_entry AS - (target_oid oid, database_oid oid, tablespace_oid oid, target_type integer, seg_exceeded boolean); -CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE TYPE diskquota.blackmap_entry_detail AS - (target_type text, target_oid oid, database_oid oid, - tablespace_oid oid, seg_exceeded boolean, dbnode oid, spcnode oid, relnode oid, segid int); - -CREATE FUNCTION diskquota.show_blackmap() -RETURNS setof diskquota.blackmap_entry_detail -AS 'MODULE_PATHNAME', 'show_blackmap' -LANGUAGE C; - -CREATE VIEW diskquota.blackmap AS - SELECT * FROM diskquota.show_blackmap() AS BM; - -CREATE TABLE diskquota.table_size (tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid)) DISTRIBUTED BY (tableid, segid); - -CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)) DISTRIBUTED BY (state); - -INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'; - -CREATE FUNCTION diskquota.diskquota_start_worker() -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION diskquota.init_table_size_table() -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.pause() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_pause' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.resume() -RETURNS void STRICT -AS 'MODULE_PATHNAME', 'diskquota_resume' -LANGUAGE C; +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.blackmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); +CREATE TYPE diskquota.blackmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[] +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_blackmap() RETURNS setof diskquota.blackmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_blackmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local( + reltablespace oid, + relfilenode oid, + relpersistence "char", + relstorage "char") +RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part CREATE VIEW diskquota.show_fast_schema_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and qc.quotatype=0 and ts.segid=-1 +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND qc.quotatype=0 AND ts.segid=-1 group by relnamespace, qc.quotalimitMB, pgns.nspname order by pgns.nspname; CREATE VIEW diskquota.show_fast_role_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and qc.quotatype=1 and ts.segid=-1 +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND qc.quotatype=1 AND ts.segid=-1 GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; + CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -select pgns.nspname as schema_name, pgc.relnamespace as schema_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as nspsize_tablespace_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_namespace as pgns, - pg_tablespace as pgsp, - diskquota.target as t -where ts.tableid = pgc.oid and qc.targetoid = pgc.relnamespace and pgns.oid = pgc.relnamespace and pgsp.oid = pgc.reltablespace and qc.quotatype=2 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 +SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_tablespace_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_namespace AS pgns, + pg_tablespace AS pgsp, + diskquota.target AS t +WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND pgsp.oid = pgc.reltablespace AND qc.quotatype=2 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid=-1 group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname order by pgns.nspname, pgsp.spcname; CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -select pgr.rolname as role_name, pgc.relowner as role_oid, pgsp.spcname as tablespace_name, pgc.reltablespace as tablespace_oid, qc.quotalimitMB as quota_in_mb, sum(ts.size) as rolsize_tablespace_in_bytes -from diskquota.table_size as ts, - pg_class as pgc, - diskquota.quota_config as qc, - pg_roles as pgr, - pg_tablespace as pgsp, - diskquota.target as t -WHERE pgc.relowner = qc.targetoid and pgc.relowner = pgr.oid and ts.tableid = pgc.oid and pgsp.oid = pgc.reltablespace and qc.quotatype=3 and qc.targetoid=t.primaryoid and t.tablespaceoid=pgc.reltablespace and ts.segid=-1 +SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_tablespace_in_bytes +FROM diskquota.table_size AS ts, + pg_class AS pgc, + diskquota.quota_config AS qc, + pg_roles AS pgr, + pg_tablespace AS pgsp, + diskquota.target AS t +WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND pgsp.oid = pgc.reltablespace AND qc.quotatype=3 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid=-1 GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; +-- view end -CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ((SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384)+ (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1)) AS dbsize; - -CREATE TYPE diskquota.diskquota_active_table_type AS ("TABLE_OID" oid, "TABLE_SIZE" int8, "GP_SEGMENT_ID" smallint); - -CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type -AS 'MODULE_PATHNAME', 'diskquota_fetch_table_stat' -LANGUAGE C VOLATILE; +-- prepare to boot +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; SELECT diskquota.diskquota_start_worker(); DROP FUNCTION diskquota.diskquota_start_worker(); --- TODO: support upgrade/downgrade -CREATE OR REPLACE FUNCTION diskquota.relation_size_local( - reltablespace oid, - relfilenode oid, - relpersistence "char", - relstorage "char") -RETURNS bigint STRICT -AS 'MODULE_PATHNAME', 'relation_size_local' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.relation_size( - relation regclass) -RETURNS bigint STRICT -AS $$ -SELECT sum(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation -) AS t -$$ LANGUAGE SQL; - -CREATE TYPE diskquota.relation_cache_detail AS - (RELID oid, PRIMARY_TABLE_OID oid, AUXREL_NUM int, - OWNEROID oid, NAMESPACEOID oid, BACKENDID int, SPCNODE oid, DBNODE oid, RELNODE oid, RELSTORAGE "char", AUXREL_OID oid[]); - -CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() -RETURNS setof diskquota.relation_cache_detail -AS 'MODULE_PATHNAME', 'show_relation_cache' -LANGUAGE C; - -CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() -RETURNS setof diskquota.relation_cache_detail -as $$ -WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') -) -SELECT (a).* FROM relation_cache; -$$ LANGUAGE SQL; - --- Returns the worker epoch for the current database. --- An epoch marks a new iteration of refreshing quota usage by a bgworker. --- An epoch is a 32-bit unsigned integer and there is NO invalid value. --- Therefore, the UDF must throw an error if something unexpected occurs. -CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() -RETURNS bigint STRICT -AS 'MODULE_PATHNAME', 'show_worker_epoch' -LANGUAGE C; - --- Checks if the bgworker for the current database works as expected. --- 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. --- 2. If it does not terminate, there must be some issues with the bgworker. --- In this case, we must ensure this UDF can be interrupted by the user. -CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() -RETURNS boolean STRICT -AS 'MODULE_PATHNAME', 'wait_for_worker_new_epoch' -LANGUAGE C; - --- returns the current status in current database -CREATE OR REPLACE FUNCTION diskquota.status() -RETURNS TABLE ("name" text, "status" text) STRICT -AS 'MODULE_PATHNAME', 'diskquota_status' -LANGUAGE C; - -- re-dispatch pause status to false. in case user pause-drop-recreate. -- refer to see test case 'test_drop_after_pause' -SELECT from diskquota.resume(); - +SELECT FROM diskquota.resume(); diff --git a/diskquota.c b/diskquota.c index 6f51f64dcc6..5c61a9423c4 100644 --- a/diskquota.c +++ b/diskquota.c @@ -41,6 +41,10 @@ PG_MODULE_MAGIC; #define DISKQUOTA_DB "diskquota" #define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" +#ifndef DISKQUOTA_BINARY_NAME + #error DISKQUOTA_BINARY_NAME should be defined by build system +#endif + #include // for useconds_t extern int usleep(useconds_t usec); // in @@ -124,7 +128,7 @@ _PG_init(void) /* diskquota.so must be in shared_preload_libraries to init SHM. */ if (!process_shared_preload_libraries_in_progress) - ereport(ERROR, (errmsg("diskquota.so not in shared_preload_libraries."))); + ereport(ERROR, (errmsg(DISKQUOTA_BINARY_NAME " not in shared_preload_libraries."))); /* values are used in later calls */ define_guc_variables(); @@ -148,7 +152,7 @@ _PG_init(void) worker.bgw_start_time = BgWorkerStart_RecoveryFinished; /* launcher process should be restarted after pm reset. */ worker.bgw_restart_time = BGW_DEFAULT_RESTART_INTERVAL; - snprintf(worker.bgw_library_name, BGW_MAXLEN, "diskquota"); + snprintf(worker.bgw_library_name, BGW_MAXLEN, DISKQUOTA_BINARY_NAME); snprintf(worker.bgw_function_name, BGW_MAXLEN, "disk_quota_launcher_main"); worker.bgw_notify_pid = 0; @@ -1048,7 +1052,7 @@ start_worker_by_dboid(Oid dbid) * be started by launcher process again. */ worker.bgw_restart_time = BGW_NEVER_RESTART; - sprintf(worker.bgw_library_name, "diskquota"); + sprintf(worker.bgw_library_name, DISKQUOTA_BINARY_NAME); sprintf(worker.bgw_function_name, "disk_quota_worker_main"); dbname = get_database_name(dbid); @@ -1141,6 +1145,10 @@ worker_get_epoch(Oid database_oid) return epoch; } +// Returns the worker epoch for the current database. +// An epoch marks a new iteration of refreshing quota usage by a bgworker. +// An epoch is a 32-bit unsigned integer and there is NO invalid value. +// Therefore, the UDF must throw an error if something unexpected occurs. PG_FUNCTION_INFO_V1(show_worker_epoch); Datum show_worker_epoch(PG_FUNCTION_ARGS) @@ -1271,6 +1279,10 @@ check_for_timeout(TimestampTz start_time) return false; } +// Checks if the bgworker for the current database works as expected. +// 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. +// 2. If it does not terminate, there must be some issues with the bgworker. +// In this case, we must ensure this UDF can be interrupted by the user. PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); Datum wait_for_worker_new_epoch(PG_FUNCTION_ARGS) diff --git a/diskquota.control b/diskquota.control index aa8c4b083bb..9a73625f62b 100644 --- a/diskquota.control +++ b/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' default_version = '2.0' -module_pathname = '$libdir/diskquota' +module_pathname = 'do-not-use-this' relocatable = true diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out index 8dfe5f193b6..c322ad8b37e 100644 --- a/tests/isolation2/expected/config.out +++ b/tests/isolation2/expected/config.out @@ -1,5 +1,5 @@ -!\retcode gpconfig -c shared_preload_libraries -v diskquota; +!\retcode gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); (exited with code 0) !\retcode gpstop -raf; (exited with code 0) @@ -27,4 +27,3 @@ -------------------------- 60 (1 row) - diff --git a/tests/isolation2/sql/config.sql b/tests/isolation2/sql/config.sql index 5c4f3170f82..c551d847a6e 100644 --- a/tests/isolation2/sql/config.sql +++ b/tests/isolation2/sql/config.sql @@ -2,7 +2,7 @@ CREATE DATABASE diskquota; --end_ignore -!\retcode gpconfig -c shared_preload_libraries -v diskquota; +!\retcode gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); !\retcode gpstop -raf; !\retcode gpconfig -c diskquota.naptime -v 0; @@ -13,4 +13,3 @@ CREATE DATABASE diskquota; 1: SHOW diskquota.naptime; 1: SHOW diskquota.max_active_tables; 1: SHOW diskquota.worker_timeout; - \ No newline at end of file diff --git a/tests/regress/Makefile b/tests/regress/Makefile index 398997c6acc..d017bde6e09 100644 --- a/tests/regress/Makefile +++ b/tests/regress/Makefile @@ -1,9 +1,5 @@ REGRESS = dummy -ifeq ("$(INTEGRATION_TEST)","y") -REGRESS_OPTS = --schedule=diskquota_schedule_int --init-file=../init_file --init-file=./regress_init_file -else REGRESS_OPTS = --schedule=diskquota_schedule --init-file=../init_file --init-file=./regress_init_file -endif # FIXME: This check is hacky, since test_fetch_table_stat relies on the # gp_inject_fault extension, we detect if the extension is built with diff --git a/tests/regress/diskquota_schedule_int b/tests/regress/diskquota_schedule_int deleted file mode 100644 index 383ecffea83..00000000000 --- a/tests/regress/diskquota_schedule_int +++ /dev/null @@ -1,12 +0,0 @@ -test: init -test: prepare -test: test_relation_size -#test: test_table_size -test: test_fast_disk_check -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake -test: test_truncate -test: test_delete_quota -test: test_partition -test: test_manytable -test: clean -#test: test_insert_after_drop diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index 2a14961f3c3..225ff14603b 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -1,7 +1,7 @@ --start_ignore CREATE DATABASE diskquota; -\! gpconfig -c shared_preload_libraries -v diskquota +\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); \! gpstop -raf \! gpconfig -c diskquota.naptime -v 0 diff --git a/upgrade_test/expected/init.out b/upgrade_test/expected/init.out index 0f149d65ca9..c82c103ebd8 100644 --- a/upgrade_test/expected/init.out +++ b/upgrade_test/expected/init.out @@ -1,5 +1,5 @@ -- start_ignore -\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null -- end_ignore \! echo $? 0 diff --git a/upgrade_test/sql/init.sql b/upgrade_test/sql/init.sql index e8b1d49854f..a00312f2bfc 100644 --- a/upgrade_test/sql/init.sql +++ b/upgrade_test/sql/init.sql @@ -1,5 +1,5 @@ -- start_ignore -\! gpconfig -c shared_preload_libraries -v diskquota > /dev/null +\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null -- end_ignore \! echo $? -- start_ignore From 65ef6c83010dd53020a3ae7f7ede3feeb13b560a Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 3 Mar 2022 13:56:16 +0800 Subject: [PATCH 142/330] Keep the 1.0 sql the same with 1.x branch (#163) The added 'DISTRIBUTED BY' clause should have no impact on 1.x. --- diskquota--1.0.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index 827b682734e..d4dd4e79608 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -9,7 +9,7 @@ CREATE TABLE diskquota.quota_config( quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); +); CREATE TABLE diskquota.table_size( tableid oid, @@ -20,7 +20,7 @@ CREATE TABLE diskquota.table_size( CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -) DISTRIBUTED BY (state); +); -- only diskquota.quota_config is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); From 7f3a2aef9a09fc47a1a27282fe530e770bfa3598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Fri, 4 Mar 2022 08:04:58 +0800 Subject: [PATCH 143/330] Test when table is created before quota is set (#129) * Test when table is created before quota is set This patch adds test cases to ensure that diskquota works when table is created before quota is set. Specifically, - When diskquota is preloaded, quota usage is updated automatically for each active table. Therefore, quotas work once they are set and no more action is required from the user. - When diskquota is NOT preloaded, no active tables will be recorded. As a result, when diskquota is preloaded again, the user need to SELECT diskquota.init_table_size_table(); and restarts GPDB manually in order to update the quota usage. This patch also fixes some minor issues in the existing cases. * Replace wait() with sleep() --- tests/regress/diskquota_schedule | 2 + .../expected/test_ctas_before_set_quota.out | 61 +++++++++++++ .../expected/test_ctas_no_preload_lib.out | 88 +++++++++++++++++++ tests/regress/expected/test_index.out | 51 ++++++++--- tests/regress/expected/test_role.out | 30 +++++-- .../sql/test_ctas_before_set_quota.sql | 32 +++++++ .../regress/sql/test_ctas_no_preload_lib.sql | 56 ++++++++++++ tests/regress/sql/test_index.sql | 25 +++++- tests/regress/sql/test_role.sql | 14 ++- 9 files changed, 336 insertions(+), 23 deletions(-) create mode 100644 tests/regress/expected/test_ctas_before_set_quota.out create mode 100644 tests/regress/expected/test_ctas_no_preload_lib.out create mode 100644 tests/regress/sql/test_ctas_before_set_quota.sql create mode 100644 tests/regress/sql/test_ctas_no_preload_lib.sql diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index fa252b9a261..e5521064012 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -13,6 +13,8 @@ test: test_update_db_cache test: test_fast_disk_check #test: test_insert_after_drop test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate +test: test_ctas_no_preload_lib +test: test_ctas_before_set_quota test: test_truncate test: test_delete_quota test: test_partition diff --git a/tests/regress/expected/test_ctas_before_set_quota.out b/tests/regress/expected/test_ctas_before_set_quota.out new file mode 100644 index 00000000000..34cd230f1b7 --- /dev/null +++ b/tests/regress/expected/test_ctas_before_set_quota.out @@ -0,0 +1,61 @@ +CREATE ROLE test SUPERUSER; +SET ROLE test; +CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; + tableid | size | segid +--------------------+---------+------- + t_before_set_quota | 3637248 | -1 + t_before_set_quota | 1212416 | 0 + t_before_set_quota | 1212416 | 1 + t_before_set_quota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the blackmap +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE t_before_set_quota; +RESET ROLE; +DROP ROLE test; diff --git a/tests/regress/expected/test_ctas_no_preload_lib.out b/tests/regress/expected/test_ctas_no_preload_lib.out new file mode 100644 index 00000000000..332ee193cd9 --- /dev/null +++ b/tests/regress/expected/test_ctas_no_preload_lib.out @@ -0,0 +1,88 @@ +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -far > /dev/null +\c +CREATE ROLE test SUPERUSER; +SET ROLE test; +-- Create table with diskquota disabled +CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +\! gpconfig -c shared_preload_libraries -v 'diskquota'> /dev/null +\! gpstop -far > /dev/null +\c +-- Make sure that the worker has started. +-- We cannot use wait_for_worker_new_epoch() here because the worker might not +-- have started yet. +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +SET ROLE test; +-- Init table_size to include the table +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Restart to load diskquota.table_size to the memory. +\! gpstop -far > /dev/null +\c +SET ROLE test; +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; + tableid | size | segid +---------------------+---------+------- + t_without_diskquota | 3637248 | -1 + t_without_diskquota | 1212416 | 0 + t_without_diskquota | 1212416 | 1 + t_without_diskquota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the blackmap +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + rolname +--------- +(0 rows) + +DROP TABLE t_without_diskquota; +RESET ROLE; +DROP ROLE test; diff --git a/tests/regress/expected/test_index.out b/tests/regress/expected/test_index.out index f1077c9643b..2799f93f172 100644 --- a/tests/regress/expected/test_index.out +++ b/tests/regress/expected/test_index.out @@ -8,8 +8,6 @@ NOTICE: tablespace "indexspc" does not exist, skipping CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; SET search_path TO indexschema1; CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO test_index_a SELECT generate_series(1,20000); SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); set_schema_tablespace_quota @@ -23,17 +21,25 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view +WHERE schema_name='indexschema1' and tablespace_name='indexspc'; schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- indexschema1 | indexspc | 2 | 1081344 (1 row) -SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and relname='test_index_a' and segid=-1; - size | segid ----------+------- - 1081344 | -1 -(1 row) +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1081344 | -1 + test_index_a | 360448 | 0 + test_index_a | 360448 | 1 + test_index_a | 360448 | 2 +(4 rows) -- create index for the table, index in default tablespace CREATE INDEX a_index ON test_index_a(i); @@ -52,12 +58,29 @@ SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM indexschema1 | indexspc | 2 | 1441792 (1 row) -SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; - size | segid ----------+------- - 1441792 | -1 - 1212416 | -1 -(2 rows) +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1441792 | -1 + test_index_a | 491520 | 0 + test_index_a | 491520 | 1 + test_index_a | 458752 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'a_index'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + a_index | 1212416 | -1 + a_index | 393216 | 0 + a_index | 393216 | 1 + a_index | 393216 | 2 +(4 rows) -- add index to tablespace indexspc ALTER index a_index SET TABLESPACE indexspc; diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index 9507912e9b6..d2ed155a7f2 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -6,12 +6,8 @@ NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE u2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO u1; CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO u1; SELECT diskquota.set_role_quota('u1', '1 MB'); set_role_quota @@ -20,7 +16,7 @@ SELECT diskquota.set_role_quota('u1', '1 MB'); (1 row) INSERT INTO b SELECT generate_series(1,100); --- expect insert fail +-- expect insert success INSERT INTO b SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -71,6 +67,30 @@ SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_qu u1 | 1 | 4194304 (1 row) +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + b | 4063232 | -1 + b | 1343488 | 0 + b | 1343488 | 1 + b | 1343488 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b2'::regclass +ORDER BY segid; + tableid | size | segid +---------+--------+------- + b2 | 131072 | -1 + b2 | 32768 | 0 + b2 | 32768 | 1 + b2 | 32768 | 2 +(4 rows) + ALTER TABLE b OWNER TO u2; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/sql/test_ctas_before_set_quota.sql b/tests/regress/sql/test_ctas_before_set_quota.sql new file mode 100644 index 00000000000..3263731ecb4 --- /dev/null +++ b/tests/regress/sql/test_ctas_before_set_quota.sql @@ -0,0 +1,32 @@ +CREATE ROLE test SUPERUSER; + +SET ROLE test; + +CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +SELECT diskquota.set_role_quota(current_role, '1MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Expect that current role is in the blackmap +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + +SELECT diskquota.set_role_quota(current_role, '-1'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +DROP TABLE t_before_set_quota; + +RESET ROLE; + +DROP ROLE test; diff --git a/tests/regress/sql/test_ctas_no_preload_lib.sql b/tests/regress/sql/test_ctas_no_preload_lib.sql new file mode 100644 index 00000000000..500e952e929 --- /dev/null +++ b/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -0,0 +1,56 @@ +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -far > /dev/null +\c + +CREATE ROLE test SUPERUSER; + +SET ROLE test; + +-- Create table with diskquota disabled +CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); + +\! gpconfig -c shared_preload_libraries -v 'diskquota'> /dev/null +\! gpstop -far > /dev/null +\c + +-- Make sure that the worker has started. +-- We cannot use wait_for_worker_new_epoch() here because the worker might not +-- have started yet. +SELECT pg_sleep(1); + +SET ROLE test; + +-- Init table_size to include the table +SELECT diskquota.init_table_size_table(); + +-- Restart to load diskquota.table_size to the memory. +\! gpstop -far > /dev/null +\c +SET ROLE test; + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + +SELECT diskquota.set_role_quota(current_role, '1MB'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Expect that current role is in the blackmap +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + +SELECT diskquota.set_role_quota(current_role, '-1'); + +SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; + +DROP TABLE t_without_diskquota; + +RESET ROLE; + +DROP ROLE test; diff --git a/tests/regress/sql/test_index.sql b/tests/regress/sql/test_index.sql index c2d5f3983d4..9aa3ef02fe9 100644 --- a/tests/regress/sql/test_index.sql +++ b/tests/regress/sql/test_index.sql @@ -9,10 +9,19 @@ SET search_path TO indexschema1; CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); INSERT INTO test_index_a SELECT generate_series(1,20000); + SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name='indexschema1' and tablespace_name='indexspc'; -SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and relname='test_index_a' and segid=-1; + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view +WHERE schema_name='indexschema1' and tablespace_name='indexspc'; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + -- create index for the table, index in default tablespace CREATE INDEX a_index ON test_index_a(i); INSERT INTO test_index_a SELECT generate_series(1,10000); @@ -20,7 +29,17 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO test_index_a SELECT generate_series(1,100); SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; -SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'a_index'::regclass +ORDER BY segid; + -- add index to tablespace indexspc ALTER index a_index SET TABLESPACE indexspc; SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index b2f4fdadcad..8edaa545ac2 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -13,7 +13,7 @@ ALTER TABLE b2 OWNER TO u1; SELECT diskquota.set_role_quota('u1', '1 MB'); INSERT INTO b SELECT generate_series(1,100); --- expect insert fail +-- expect insert success INSERT INTO b SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail @@ -31,6 +31,18 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b'::regclass +ORDER BY segid; + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b2'::regclass +ORDER BY segid; + + ALTER TABLE b OWNER TO u2; SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed From e7fd9cdb62029533377408d79f5c5fe5feb42563 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 4 Mar 2022 10:13:21 +0800 Subject: [PATCH 144/330] Install the latest cmake on CI (#165) We need the archive extract feature from cmake 3.18. Before the new version landing on the test/build images, install them in our ci scripts. A cmake binary has been uploaded to the concourse gcs for faster downloading. --- concourse/pipeline/job_def.lib.yml | 2 ++ concourse/pipeline/res_def.yml | 10 +++++++++- concourse/scripts/build_diskquota.sh | 1 + concourse/scripts/install_dep.sh | 26 ++++++++++++++++++++++++++ concourse/scripts/test_diskquota.sh | 1 + concourse/tasks/build_diskquota.yml | 1 + concourse/tasks/test_diskquota.yml | 1 + 7 files changed, 41 insertions(+), 1 deletion(-) create mode 100755 concourse/scripts/install_dep.sh diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 815faa0cdaf..f8db0f3f782 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -64,6 +64,7 @@ params: #@ trigger = param["trigger"] #@ confs = param["confs"] #@ add_res_by_name(res_map, param["gpdb_src"]) +#@ add_res_by_name(res_map, "bin_cmake") name: build_test max_in_flight: 10 on_success: #@ trigger["on_success"] @@ -76,6 +77,7 @@ plan: - in_parallel: - get: gpdb_src resource: #@ param["gpdb_src"] + - get: bin_cmake #@ for conf in confs: #@ add_res_by_conf(res_map, conf) #@ if conf["res_build_image"] == conf["res_test_image"]: diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index d4375e4b070..c7953d324e4 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -102,7 +102,7 @@ resources: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest -#! gpdb binary on gcs is located as different folder for different version +# gpdb binary on gcs is located as different folder for different version - name: bin_gpdb6_centos6 type: gcs source: @@ -127,3 +127,11 @@ resources: bucket: ((gcs-bucket-intermediates)) json_key: ((concourse-gcs-resources-service-account-key)) versioned_file: 6X_STABLE/bin_gpdb_rhel8/bin_gpdb.tar.gz + +# Other dependencies +- name: bin_cmake + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + regexp: dependencies/cmake-(.*)-linux-x86_64.sh diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index d107bd4549c..498f3f3588d 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -5,6 +5,7 @@ set -exo pipefail CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" TOP_DIR=${CWDIR}/../../../ +source "${TOP_DIR}/diskquota_src/concourse/scripts/install_dep.sh" source "${TOP_DIR}/gpdb_src/concourse/scripts/common.bash" function pkg() { [ -f /opt/gcc_env.sh ] && source /opt/gcc_env.sh diff --git a/concourse/scripts/install_dep.sh b/concourse/scripts/install_dep.sh new file mode 100755 index 00000000000..5835bd47182 --- /dev/null +++ b/concourse/scripts/install_dep.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Script to install build & test dependencies +# Ideally all dependencies should exist in the docker image. Use this script to install them only +# if it is more difficult to change it in the image side. +# Download the dependencies with concourse resources as much as possible, then we could benifit from +# concourse's resource cache system. + +set -eox + +_install_cmake() { + # cmake_new to avoid name collision with the docker image. + local cmake_home="/opt/cmake_new" + if [ -e "${cmake_home}" ]; then + echo "cmake might have been installed in ${cmake_home}" + return + fi + echo "Installing cmake to ${cmake_home}..." + pushd bin_cmake + mkdir -p "${cmake_home}" + sh cmake-*-linux-x86_64.sh --skip-license --prefix="${cmake_home}" + popd + export PATH="${cmake_home}/bin":"$PATH" +} + +_install_cmake diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index eba67ba9c90..b06504a0eb5 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -7,6 +7,7 @@ TOP_DIR=${CWDIR}/../../../ GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts CUT_NUMBER=6 +source "${TOP_DIR}/diskquota_src/concourse/scripts/install_dep.sh" source "${GPDB_CONCOURSE_DIR}/common.bash" source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index e7c74885727..05b756720cb 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -5,6 +5,7 @@ inputs: - name: bin_gpdb - name: diskquota_src - name: gpdb_src + - name: bin_cmake outputs: - name: diskquota_artifacts diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml index aa622bacc7a..160048398bf 100644 --- a/concourse/tasks/test_diskquota.yml +++ b/concourse/tasks/test_diskquota.yml @@ -6,6 +6,7 @@ inputs: - name: diskquota_src - name: bin_diskquota - name: gpdb_src + - name: bin_cmake run: path: diskquota_src/concourse/scripts/test_diskquota.sh From 1bb54f9fff75828239af542e30e3b6cf87ac0ea5 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 3 Mar 2022 15:08:41 +0800 Subject: [PATCH 145/330] dump cmake version to 3.18 --- CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cdbb4667bc0..9846ab06306 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,7 @@ -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.18) +# include_guard() need 3.10 +# file(ARCHIVE_EXTRACT foo) need 3.18 + project(diskquota) # generate 'compile_commands.json' From b993a45d614f9a7e861b9d14cccff160d244f52a Mon Sep 17 00:00:00 2001 From: Sasasu Date: Fri, 4 Mar 2022 14:23:40 +0800 Subject: [PATCH 146/330] fix case test_ctas_no_preload_lib --- tests/regress/expected/test_ctas_no_preload_lib.out | 2 +- tests/regress/sql/test_ctas_no_preload_lib.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/regress/expected/test_ctas_no_preload_lib.out b/tests/regress/expected/test_ctas_no_preload_lib.out index 332ee193cd9..38d63bf2e62 100644 --- a/tests/regress/expected/test_ctas_no_preload_lib.out +++ b/tests/regress/expected/test_ctas_no_preload_lib.out @@ -6,7 +6,7 @@ SET ROLE test; -- Create table with diskquota disabled CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); -\! gpconfig -c shared_preload_libraries -v 'diskquota'> /dev/null +\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null \! gpstop -far > /dev/null \c -- Make sure that the worker has started. diff --git a/tests/regress/sql/test_ctas_no_preload_lib.sql b/tests/regress/sql/test_ctas_no_preload_lib.sql index 500e952e929..37ee1a7886a 100644 --- a/tests/regress/sql/test_ctas_no_preload_lib.sql +++ b/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -10,7 +10,7 @@ SET ROLE test; CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); -\! gpconfig -c shared_preload_libraries -v 'diskquota'> /dev/null +\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null \! gpstop -far > /dev/null \c From 2c86d63679c8d2ce26f9f8513ecf221f529323e0 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 7 Mar 2022 13:33:18 +0800 Subject: [PATCH 147/330] Cache CMAKE_INSTALL_PREFIX (#171) CMAKE_INSTALL_PREFIX needs to be set as CACHE FORCE. Otherwise, next time call 'make install' without source greenplum_path.sh, it will be reset to '/usr/local' See the example of CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT.html --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9846ab06306..23b11483420 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,7 +68,7 @@ list( add_library(diskquota MODULE ${diskquota_SRC}) if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) - set(CMAKE_INSTALL_PREFIX "${PG_HOME}") + set(CMAKE_INSTALL_PREFIX "${PG_HOME}" CACHE PATH "default install prefix" FORCE) endif() set_target_properties( From 30914f9954f11957c70cb4e40fbd90059b8cbce9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Mon, 7 Mar 2022 16:00:58 +0800 Subject: [PATCH 148/330] Fix empty quota view when no table (#167) Currently, quota views are implemented using INNER JOIN. As a result, when there is no table belong to a role, schema, or tablespace, the quota view is empty even though there is a quota config defined for that role, schema, or tablespace. This patch fixes this by re-implementing quota views using OUTER JOIN. Plus, the implementation also uses CTEs for modularizing big queries and aggregation pushdown. --- .gitignore | 1 + diskquota--2.0.sql | 184 ++++++++++++++---- tests/regress/diskquota_schedule | 1 + .../expected/test_quota_view_no_table.out | 64 ++++++ .../regress/sql/test_quota_view_no_table.sql | 31 +++ 5 files changed, 247 insertions(+), 34 deletions(-) create mode 100644 tests/regress/expected/test_quota_view_no_table.out create mode 100644 tests/regress/sql/test_quota_view_no_table.sql diff --git a/.gitignore b/.gitignore index ffc330f1fec..332118e5a41 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *.o *.so +build regression.out regression.diffs diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 7e2b706b17c..810a9b38a53 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -117,23 +117,58 @@ CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota. -- view part CREATE VIEW diskquota.show_fast_schema_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND qc.quotatype=0 AND ts.segid=-1 -group by relnamespace, qc.quotalimitMB, pgns.nspname -order by pgns.nspname; +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA CREATE VIEW diskquota.show_fast_role_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND qc.quotatype=1 AND ts.segid=-1 -GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA CREATE VIEW diskquota.show_fast_database_size_view AS SELECT ( @@ -145,27 +180,108 @@ SELECT ( CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_tablespace_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns, - pg_tablespace AS pgsp, - diskquota.target AS t -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND pgsp.oid = pgc.reltablespace AND qc.quotatype=2 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid=-1 -group by relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname -order by pgns.nspname, pgsp.spcname; +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + targetOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.primaryOid AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_tablespace_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr, - pg_tablespace AS pgsp, - diskquota.target AS t -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND pgsp.oid = pgc.reltablespace AND qc.quotatype=3 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid=-1 -GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + targetOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.primaryOid AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON targetoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; -- view end -- prepare to boot diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index e5521064012..3598cd751e1 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -8,6 +8,7 @@ test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status test: test_update_db_cache +test: test_quota_view_no_table # disable this test due to GPDB behavior change # test: test_table_size test: test_fast_disk_check diff --git a/tests/regress/expected/test_quota_view_no_table.out b/tests/regress/expected/test_quota_view_no_table.out new file mode 100644 index 00000000000..27a0b315f5b --- /dev/null +++ b/tests/regress/expected/test_quota_view_no_table.out @@ -0,0 +1,64 @@ +CREATE ROLE no_table SUPERUSER; +CREATE SCHEMA no_table; +SELECT diskquota.set_schema_quota('no_table', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT schema_name, quota_in_mb, nspsize_in_bytes +FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_role_quota('no_table', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT role_name, quota_in_mb, rolsize_in_bytes +FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes +FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +DROP ROLE no_table; +DROP SCHEMA no_table; +-- Wait until the quota configs are removed from the memory +-- automatically after DROP. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/tests/regress/sql/test_quota_view_no_table.sql b/tests/regress/sql/test_quota_view_no_table.sql new file mode 100644 index 00000000000..11c0398bb36 --- /dev/null +++ b/tests/regress/sql/test_quota_view_no_table.sql @@ -0,0 +1,31 @@ +CREATE ROLE no_table SUPERUSER; + +CREATE SCHEMA no_table; + +SELECT diskquota.set_schema_quota('no_table', '1 MB'); + +SELECT schema_name, quota_in_mb, nspsize_in_bytes +FROM diskquota.show_fast_schema_quota_view; + +SELECT diskquota.set_role_quota('no_table', '1 MB'); + +SELECT role_name, quota_in_mb, rolsize_in_bytes +FROM diskquota.show_fast_role_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); + +SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes +FROM diskquota.show_fast_role_tablespace_quota_view; + +DROP ROLE no_table; + +DROP SCHEMA no_table; + +-- Wait until the quota configs are removed from the memory +-- automatically after DROP. +SELECT diskquota.wait_for_worker_new_epoch(); From 881e0f77584b1b25de0738720bb5635d5824ea86 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 7 Mar 2022 17:53:33 +0800 Subject: [PATCH 149/330] mc/cmake installcheck (#169) CMake installcheck: - Regress.cmake is introduced to add some helper functions which can easily create regress/isolation2 test targets in the CMakeLists. - When creating regress targets, cmake will generate a series of diff__ target to show result diff on a specific target. - Porting the isolation2_regress build from Makefile to cmake. - Moved the magic trap regress diff from the concourse script to cmake. If the test target is called with env `SHOW_REGRESS_DIFF=1`, then the regress.diff will be printed when the case fails. - 'sql'/`expected` will be linked to the cmake binary directory and run test from there. So the `results` won't pollute the source directory. Concourse scripts restructure: This commit also try to explore the way how the concourse scripts are structured. Currently, the container is started with root user, and this cannot be changed in a short time. But we require gpadmin user basically for everything. An `entry.sh` is introduced as a simple skeleton for the scripts. It make sure a gpadmin user is created and everything needed can be accessed by the gpadmin with a permanent path. So we don't have too much ENV magic in the task script any more. Most of the setup work should be done in the `entry.sh` and the task script should only do a few simple steps with absolute paths. By doing this, when we hijack to the concourse container, login with gpadmin user, the environment should be ready. Simple copy & paste from the task script will just work. `upgrade_extension.sh` may not be needed anymore, and it won't work. It will be cleaned up later when we have the full implementation of upgrade. --- CMakeLists.txt | 3 + Makefile | 4 - README.md | 42 +++- cmake/Gpdb.cmake | 14 ++ cmake/Regress.cmake | 143 ++++++++++++++ cmake/current_binary_name | 9 - cmake/show_regress_diff.sh | 19 ++ concourse/scripts/build_diskquota.sh | 28 +-- concourse/scripts/entry.sh | 181 ++++++++++++++++++ concourse/scripts/install_dep.sh | 26 --- concourse/scripts/test_common.sh | 60 ------ concourse/scripts/test_diskquota.sh | 49 ++--- concourse/tasks/build_diskquota.yml | 4 +- concourse/tasks/test_diskquota.yml | 4 +- tests/CMakeLists.txt | 29 +++ tests/Makefile | 14 -- tests/data/current_binary_name | 9 + tests/isolation2/expected/config.out | 2 +- tests/isolation2/sql/config.sql | 2 +- .../expected/test_ctas_no_preload_lib.out | 2 +- tests/regress/sql/config.sql | 2 +- .../regress/sql/test_ctas_no_preload_lib.sql | 2 +- upgrade_test/expected/init.out | 2 +- upgrade_test/sql/init.sql | 2 +- 24 files changed, 467 insertions(+), 185 deletions(-) delete mode 100644 Makefile create mode 100644 cmake/Regress.cmake delete mode 100755 cmake/current_binary_name create mode 100755 cmake/show_regress_diff.sh create mode 100755 concourse/scripts/entry.sh delete mode 100755 concourse/scripts/install_dep.sh delete mode 100644 concourse/scripts/test_common.sh create mode 100644 tests/CMakeLists.txt delete mode 100644 tests/Makefile create mode 100755 tests/data/current_binary_name diff --git a/CMakeLists.txt b/CMakeLists.txt index 23b11483420..1297f0ecfa4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -78,6 +78,9 @@ set_target_properties( C_STANDARD 99 LINKER_LANGUAGE "CXX") +# Add installcheck targets +add_subdirectory(tests) + # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) diff --git a/Makefile b/Makefile deleted file mode 100644 index 08205742d95..00000000000 --- a/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -.PHONY: installcheck -installcheck: - $(MAKE) -C tests installcheck-regress - $(MAKE) -C tests installcheck-isolation2 diff --git a/README.md b/README.md index ea431fc22b3..f5292c187f2 100644 --- a/README.md +++ b/README.md @@ -83,11 +83,33 @@ That is to say, a role may have different quota limit on different databases and their disk usage is isolated between databases. # Install -1. Compile disk quota with pgxs. + +(cmake)[https://cmake.org] (>= 3.18) needs to be installed. + +1. Build & install disk quota +``` +mkdir -p /build +cd /build +``` + +If the `greenplum_path.sh` has been source: + +``` +cmake .. +``` + +Otherwise: + +``` +# Without source greenplum_path.sh +cmake .. --DPG_CONFIG=/bin/pg_config +# +``` + +Build and install: + ``` -cd $diskquota_src; -make; -make install; +make install ``` 2. Create database to store global information. @@ -171,11 +193,19 @@ select * from diskquota.show_fast_schema_quota_view; # Test -Run regression tests. +Run regression tests: ``` -cd diskquota_src; +cd /build; make installcheck ``` +Show quick diff of regress results: +``` +make diff__ +``` +Show all build target: +``` +make help +``` # HA Not implemented yet. One solution would be: start launcher process on standby diff --git a/cmake/Gpdb.cmake b/cmake/Gpdb.cmake index 91f44200af3..25fcfa15b69 100644 --- a/cmake/Gpdb.cmake +++ b/cmake/Gpdb.cmake @@ -17,6 +17,7 @@ # PG_C_FLAGS - CFLAGS value used when PostgreSQL was built # PG_LD_FLAGS - LDFLAGS value used when PostgreSQL was built # PG_HOME - The installation directory of Greenplum +# PG_SRC_DIR - The directory of the postgres/greenplum source code include_guard() find_program(PG_CONFIG pg_config) @@ -37,6 +38,19 @@ exec_program(${PG_CONFIG} ARGS --libs OUTPUT_VARIABLE PG_LIBS) exec_program(${PG_CONFIG} ARGS --libdir OUTPUT_VARIABLE PG_LIB_DIR) exec_program(${PG_CONFIG} ARGS --pgxs OUTPUT_VARIABLE PG_PGXS) get_filename_component(PG_HOME "${PG_BIN_DIR}/.." ABSOLUTE) +if (NOT PG_SRC_DIR) + get_filename_component(pgsx_SRC_DIR ${PG_PGXS} DIRECTORY) + set(makefile_global ${pgsx_SRC_DIR}/../Makefile.global) + # Some magic to find out the source code root from pg's Makefile.global + execute_process( + COMMAND_ECHO STDOUT + COMMAND + grep abs_top_builddir ${makefile_global} + COMMAND + sed s/.*abs_top_builddir.*=\\\(.*\\\)/\\1/ + OUTPUT_VARIABLE PG_SRC_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) + string(STRIP ${PG_SRC_DIR} PG_SRC_DIR) +endif() # Get the GP_MAJOR_VERSION from header file(READ ${PG_INCLUDE_DIR}/pg_config.h config_header) diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake new file mode 100644 index 00000000000..9b026995dbb --- /dev/null +++ b/cmake/Regress.cmake @@ -0,0 +1,143 @@ +# CMake module for create regress test target. +# +# Usage: +# RegressTarget_Add( +# SQL_DIR +# EXPECTED_DIR +# [INIT_FILE ...] +# [SCHEDULE_FILE ...] +# [REGRESS ...] +# [REGRESS_OPTS ...] +# [REGRESS_TYPE isolation2/regress] +# ) +# All the file path can be the relative path to ${CMAKE_CURRENT_SOURCE_DIR}. +# A bunch of diff targets will be created as well for comparing the regress results. The diff +# target names like diff__ +# +# NOTE: To use this cmake file in another project, the show_regress_diff.sh needs to be placed +# alongside. +# +# Example: +# RegressTarget_Add(installcheck_avro_fmt +# REGRESS ${avro_regress_TARGETS} +# INIT_FILE init_file +# DATA_DIR data +# SQL_DIR sql +# EXPECTED_DIR expected_${GP_MAJOR_VERSION}) + +# CMAKE_CURRENT_FUNCTION_LIST_DIR - 3.17 +cmake_minimum_required(VERSION 3.17) + +# pg_isolation2_regress was not shipped with GPDB release. It needs to be created from source. +function(_PGIsolation2Target_Add working_DIR) + if(TARGET pg_isolation2_regress) + return() + endif() + + add_custom_target( + pg_isolation2_regress + COMMAND + make -C ${PG_SRC_DIR}/src/test/isolation2 install + COMMAND + ${CMAKE_COMMAND} -E copy_if_different + ${PG_SRC_DIR}/src/test/isolation2/sql_isolation_testcase.py ${working_DIR} + ) +endfunction() + +function(RegressTarget_Add name) + cmake_parse_arguments( + arg + "" + "SQL_DIR;EXPECTED_DIR;DATA_DIR;REGRESS_TYPE" + "REGRESS;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" + ${ARGN}) + if (NOT arg_EXPECTED_DIR) + message(FATAL_ERROR + "'EXPECTED_DIR' needs to be specified.") + endif() + if (NOT arg_SQL_DIR) + message(FATAL_ERROR + "'SQL_DIR' needs to be specified.") + endif() + + set(working_DIR "${CMAKE_CURRENT_BINARY_DIR}/${name}") + file(MAKE_DIRECTORY ${working_DIR}) + + # Isolation2 test has different executable to run + if(arg_REGRESS_TYPE STREQUAL isolation2) + set(regress_BIN ${PG_SRC_DIR}/src/test/isolation2/pg_isolation2_regress) + _PGIsolation2Target_Add(${working_DIR}) + else() + set(regress_BIN ${PG_PKG_LIB_DIR}/pgxs/src/test/regress/pg_regress) + if (NOT EXISTS ${regress_BIN}) + message(FATAL_ERROR + "Cannot find 'pg_regress' executable by path '${regress_BIN}'. Is 'pg_config' in the $PATH?") + endif() + endif() + + # Set REGRESS test cases + foreach(r IN LISTS arg_REGRESS) + set(regress_arg ${regress_arg} ${r}) + endforeach() + + # Set REGRESS options + foreach(o IN LISTS arg_INIT_FILE) + get_filename_component(init_file_PATH ${o} ABSOLUTE) + list(APPEND arg_REGRESS_OPTS "--init=${init_file_PATH}") + endforeach() + foreach(o IN LISTS arg_SCHEDULE_FILE) + get_filename_component(schedule_file_PATH ${o} ABSOLUTE) + list(APPEND arg_REGRESS_OPTS "--schedule=${schedule_file_PATH}") + endforeach() + foreach(o IN LISTS arg_REGRESS_OPTS) + set(regress_opts_arg ${regress_opts_arg} ${o}) + endforeach() + + get_filename_component(sql_DIR ${arg_SQL_DIR} ABSOLUTE) + get_filename_component(expected_DIR ${arg_EXPECTED_DIR} ABSOLUTE) + if (arg_DATA_DIR) + get_filename_component(data_DIR ${arg_DATA_DIR} ABSOLUTE) + set(ln_data_dir_CMD ln -s ${data_DIR} data) + endif() + + # Create the target + add_custom_target( + ${name} + WORKING_DIRECTORY ${working_DIR} + COMMAND rm -f sql + COMMAND ln -s ${sql_DIR} sql + COMMAND rm -f expected + COMMAND ln -s ${expected_DIR} expected + COMMAND rm -f data + COMMAND ${ln_data_dir_CMD} + COMMAND + ${regress_BIN} --psqldir='${PG_BIN_DIR}' ${regress_opts_arg} ${regress_arg} + || + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/show_regress_diff.sh ${working_DIR} + ) + + if(arg_REGRESS_TYPE STREQUAL isolation2) + add_dependencies(${name} pg_isolation2_regress) + endif() + + # Add targets for easily showing results diffs + FILE(GLOB expected_files ${expected_DIR}/*.out) + foreach(f IN LISTS expected_files) + get_filename_component(casename ${f} NAME_WE) + set(diff_target_name diff_${name}_${casename}) + # Check if the diff target has been created before + if(NOT TARGET ${diff_target_name}) + add_custom_target(${diff_target_name} + COMMAND + diff + ${working_DIR}/expected/${casename}.out + ${working_DIR}/results/${casename}.out || exit 0 + COMMAND + echo ${working_DIR}/expected/${casename}.out + COMMAND + echo ${working_DIR}/results/${casename}.out + ) + endif() + endforeach() +endfunction() + diff --git a/cmake/current_binary_name b/cmake/current_binary_name deleted file mode 100755 index 475789a561d..00000000000 --- a/cmake/current_binary_name +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -cd "$(dirname "$0")" - -if [ $(grep -P '^1.0' ../VERSION) ]; then - echo -n "diskquota.so" -else - echo -n "diskquota-$(grep -o -P '^\d+.\d+' ../VERSION).so" -fi diff --git a/cmake/show_regress_diff.sh b/cmake/show_regress_diff.sh new file mode 100755 index 00000000000..cc1de5c2c8f --- /dev/null +++ b/cmake/show_regress_diff.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ -z "${SHOW_REGRESS_DIFF}" ]; then + exit 1 +fi + +diff_files=$(find "$1" -name regression.diffs) +for diff_file in ${diff_files}; do + if [ -f "${diff_file}" ]; then + cat <<-FEOF +====================================================================== +DIFF FILE: ${diff_file} +====================================================================== + +$(grep -v GP_IGNORE "${diff_file}") +FEOF + fi +done +exit 1 diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 498f3f3588d..ea443540612 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -2,11 +2,6 @@ set -exo pipefail -CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -TOP_DIR=${CWDIR}/../../../ - -source "${TOP_DIR}/diskquota_src/concourse/scripts/install_dep.sh" -source "${TOP_DIR}/gpdb_src/concourse/scripts/common.bash" function pkg() { [ -f /opt/gcc_env.sh ] && source /opt/gcc_env.sh source /usr/local/greenplum-db-devel/greenplum_path.sh @@ -15,30 +10,13 @@ function pkg() { export CC="$(which gcc)" fi - export USE_PGXS=1 - pushd diskquota_src/ - DISKQUOTA_VERSION=$(git describe --tags) - mkdir build - cmake -B build . - make -C build install - popd - - pushd /usr/local/greenplum-db-devel/ - echo 'cp -r lib share $GPHOME || exit 1'> install_gpdb_component - chmod a+x install_gpdb_component - tar -czf "$TOP_DIR/diskquota_artifacts/diskquota-${DISKQUOTA_VERSION}-${DISKQUOTA_OS}_x86_64.tar.gz" \ - lib/postgresql/diskquota*.so \ - "share/postgresql/extension/diskquota.control" \ - "share/postgresql/extension/diskquota--1.0.sql" \ - "share/postgresql/extension/diskquota--2.0.sql" \ - "share/postgresql/extension/diskquota--1.0--2.0.sql" \ - "share/postgresql/extension/diskquota--2.0--1.0.sql" \ - "install_gpdb_component" + pushd /home/gpadmin/diskquota_artifacts + cmake /home/gpadmin/diskquota_src + cmake --build . --target package popd } function _main() { - time install_gpdb time pkg } diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh new file mode 100755 index 00000000000..3a4b2e2411c --- /dev/null +++ b/concourse/scripts/entry.sh @@ -0,0 +1,181 @@ +#!/bin/bash + +# Entry point for GPDB source & cluster related tasks. +# This script setup basic build/test environment including: +# - Create a gpadmin user +# - Copy all files from /tmp/build/xxx (concourse WORKDIR) to /home/gpadmin/ and chown to gpadmin +# - Some dependencies doesn't exist in build/test image. +# - Special setup for individual task which needs root permission. +# - At the end, call the task script with gpadmin permission. +# +# Simple rules: +# 1. Any root operations should happen in this script. +# 2. Task script only requires gpadmin permission. +# 3. Since everything has been copied to the /home/gpadmin directory, use absolute path as much as +# as possible in the task script, it will reduce the confusion when we fly into the concourse +# container. +# 4. Bash functions should be idempotent as much as possible to make fly hijack debugging easier. + +set -eox + +if [[ ! ${PWD} =~ /tmp/build/[0-9a-z]* ]]; then + echo "This script should always be started from concourse WORKDIR." +fi + +# Internal utilty functions +_determine_os() { + local name version + if [ -f /etc/redhat-release ]; then + name="centos" + version=$(sed /dev/stderr + exit 1 + fi + echo "${name}${version}" +} + +# Global ENV defines +# /tmp/build/xxxxx. it should not be used in normal conditions. Use /home/gpadmin instead. +# Everything has been linked there. +export CONCOURSE_WORK_DIR=${PWD} + + +# Dependency installers +# Ideally all dependencies should exist in the docker image. Use this script to install them only +# if it is more difficult to change it in the image side. +# Download the dependencies with concourse resources as much as possible, then we could benifit from +# concourse's resource cache system. +install_cmake() { + # cmake_new to avoid name collision with the docker image. + local cmake_home="/opt/cmake_new" + if [ -e "${cmake_home}" ]; then + echo "cmake might have been installed in ${cmake_home}" + return + fi + echo "Installing cmake to ${cmake_home}..." + pushd bin_cmake + mkdir -p "${cmake_home}" + sh cmake-*-linux-x86_64.sh --skip-license --prefix="${cmake_home}" + popd + echo "export PATH=${cmake_home}/bin:$PATH" >> /home/gpadmin/.bashrc +} + +# Create gpadmin user and chown all files in the PWD. All files will be linked to /home/gpadmin. +# All of our work should be started from there. +setup_gpadmin() { + # If the gpadmin exist, quit + if grep -c '^gpadmin:' /etc/passwd; then + return + fi + + # If the image has sshd, then we call gpdb's setup_gpadmin_user.sh to create the gpadmin user + # and setup the ssh. + # Otherwise, create the gpadmin user only. + if [ -f /etc/ssh/sshd_config ]; then + local gpdb_concourse_dir="${CONCOURSE_WORK_DIR}/gpdb_src/concourse/scripts" + "${gpdb_concourse_dir}/setup_gpadmin_user.bash" + else + local test_os=$(_determine_os) + # Below is copied from setup_gpadmin_user.bash + groupadd supergroup + case "$test_os" in + centos*) + /usr/sbin/useradd -G supergroup,tty gpadmin + ;; + ubuntu*) + /usr/sbin/useradd -G supergroup,tty gpadmin -s /bin/bash + ;; + sles*) + # create a default group gpadmin, and add user gpadmin to group gapdmin, supergroup, + # tty + /usr/sbin/useradd -U -G supergroup,tty gpadmin + ;; + photon*) + /usr/sbin/useradd -U -G supergroup,tty,root gpadmin + ;; + *) echo "Unknown OS: $test_os"; exit 1 ;; + esac + echo -e "password\npassword" | passwd gpadmin + fi + mkdir -p /home/gpadmin + chown gpadmin:gpadmin /home/gpadmin + + chown -R gpadmin:gpadmin /tmp/build + ln -s "${CONCOURSE_WORK_DIR}"/* /home/gpadmin +} + +# Extract gpdb binary +function install_gpdb() { + [ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel + tar -xzf "${CONCOURSE_WORK_DIR}/bin_gpdb/bin_gpdb.tar.gz" -C /usr/local/greenplum-db-devel + chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel +} + +## Currently, isolation2 testing framework relies on pg_isolation2_regress, we +## should build it from source. However, in concourse, the gpdb_bin is fetched +## from remote machine, the $(abs_top_srcdir) variable points to a non-existing +## location, we fixes this issue by creating a symbolic link for it. +function create_fake_gpdb_src() { + local fake_gpdb_src + fake_gpdb_src=/tmp/build/"$(\ + grep -rnw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ + head -n 1 | awk -F"/" '{print $(NF-1)}')" + + if [ -d "${fake_gpdb_src}" ]; then + echo "Fake gpdb source directory has been configured." + return + fi + + pushd /home/gpadmin/gpdb_src + ./configure --prefix=/usr/local/greenplum-db-devel \ + --without-zstd \ + --disable-orca --disable-gpcloud --enable-debug-extensions + popd + + mkdir -p "${fake_gpdb_src}" + ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}/gpdb_src" +} + +# Setup common environment +setup_gpadmin +install_cmake +install_gpdb + +# Do the special setup with root permission for the each task, then run the real task script with +# gpadmin. bashrc won't be read by 'su', it needs to be sourced explicitly. +case "$1" in + build) + su gpadmin -c \ + "source /home/gpadmin/.bashrc &&\ + /home/gpadmin/diskquota_src/concourse/scripts/build_diskquota.sh" + ;; + test) + # Build task output is diskquota_artifacts, which is different from test taks input + # diskquota_bin. Ideally we can use the same name for input and output. But that will cause + # compatible issues with 1.x pipeline script. + ln -s /home/gpadmin/bin_diskquota /home/gpadmin/diskquota_artifacts + create_fake_gpdb_src + # Create GPDB cluster + source "/home/gpadmin/gpdb_src/concourse/scripts/common.bash" + make_cluster + # To make fly debug easier + echo "source /usr/local/greenplum-db-devel/greenplum_path.sh" >> /home/gpadmin/.bashrc + su gpadmin -c \ + "source /home/gpadmin/.bashrc &&\ + /home/gpadmin/diskquota_src/concourse/scripts/test_diskquota.sh" + ;; + *) + echo "Unknown target task $1" + exit 1 + ;; +esac diff --git a/concourse/scripts/install_dep.sh b/concourse/scripts/install_dep.sh deleted file mode 100755 index 5835bd47182..00000000000 --- a/concourse/scripts/install_dep.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Script to install build & test dependencies -# Ideally all dependencies should exist in the docker image. Use this script to install them only -# if it is more difficult to change it in the image side. -# Download the dependencies with concourse resources as much as possible, then we could benifit from -# concourse's resource cache system. - -set -eox - -_install_cmake() { - # cmake_new to avoid name collision with the docker image. - local cmake_home="/opt/cmake_new" - if [ -e "${cmake_home}" ]; then - echo "cmake might have been installed in ${cmake_home}" - return - fi - echo "Installing cmake to ${cmake_home}..." - pushd bin_cmake - mkdir -p "${cmake_home}" - sh cmake-*-linux-x86_64.sh --skip-license --prefix="${cmake_home}" - popd - export PATH="${cmake_home}/bin":"$PATH" -} - -_install_cmake diff --git a/concourse/scripts/test_common.sh b/concourse/scripts/test_common.sh deleted file mode 100644 index d800e6132bb..00000000000 --- a/concourse/scripts/test_common.sh +++ /dev/null @@ -1,60 +0,0 @@ -# the directory to run the "make install" as the first param -# the second param is a bool var, used to judge if need to active the standby -# and run the regress test again -function test(){ - chown -R gpadmin:gpadmin ${TOP_DIR}; - cat > /home/gpadmin/test.sh <<-EOF - set -exo pipefail - source gpdb_src/gpAux/gpdemo/gpdemo-env.sh - echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh - source /usr/local/greenplum-db-devel/greenplum_path.sh - createdb diskquota - gpstop -arf - # the dir to run the "make install" command - pushd $1 - - function look4diffs() { - diff_files=\`find .. -name regression.diffs\` - for diff_file in \${diff_files}; do - if [ -f "\${diff_file}" ]; then - cat <<-FEOF - ====================================================================== - DIFF FILE: \${diff_file} - ====================================================================== - - \$(grep -v GP_IGNORE "\${diff_file}") - FEOF - fi - done - exit 1 - } - - trap look4diffs ERR - make installcheck - - if $2 ; then - ## Bring down the QD. - gpstop -may -M immediate - export PGPORT=6001 - export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/standby - gpactivatestandby -ad \$MASTER_DATA_DIRECTORY - echo "export PGPORT=\$PGPORT" >> /usr/local/greenplum-db-devel/greenplum_path.sh - echo "export MASTER_DATA_DIRECTORY=\$MASTER_DATA_DIRECTORY" >> /usr/local/greenplum-db-devel/greenplum_path.sh - source /usr/local/greenplum-db-devel/greenplum_path.sh - make installcheck - fi - popd - EOF - export MASTER_DATA_DIRECTORY=${TOP_DIR}/gpdb_src/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 - chown gpadmin:gpadmin /home/gpadmin/test.sh - chmod a+x /home/gpadmin/test.sh - su gpadmin -c "bash /home/gpadmin/test.sh" -} - -function setup_gpadmin_user() { - ${GPDB_CONCOURSE_DIR}/setup_gpadmin_user.bash -} - -function install_diskquota() { - tar -xzf bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel -} diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index b06504a0eb5..42701c38dec 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -2,40 +2,25 @@ set -exo pipefail -CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -TOP_DIR=${CWDIR}/../../../ -GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -CUT_NUMBER=6 - -source "${TOP_DIR}/diskquota_src/concourse/scripts/install_dep.sh" -source "${GPDB_CONCOURSE_DIR}/common.bash" -source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" - -## Currently, isolation2 testing framework relies on pg_isolation2_regress, we -## should build it from source. However, in concourse, the gpdb_bin is fetched -## from remote machine, the $(abs_top_srcdir) variable points to a non-existing -## location, we fixes this issue by creating a symbolic link for it. -function create_fake_gpdb_src() { - pushd gpdb_src - ./configure --prefix=/usr/local/greenplum-db-devel \ - --without-zstd \ - --disable-orca --disable-gpcloud --enable-debug-extensions - popd - - FAKE_GPDB_SRC=/tmp/build/"$(grep -rnw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' | head -n 1 | awk -F"/" '{print $(NF-1)}')" - mkdir -p ${FAKE_GPDB_SRC} - ln -s ${TOP_DIR}/gpdb_src ${FAKE_GPDB_SRC}/gpdb_src +function activate_standby() { + gpstop -may -M immediate + export PGPORT=6001 + export MASTER_DATA_DIRECTORY=/home/gpadmin/gpdb_src/gpAux/gpdemo/datadirs/standby + gpactivatestandby -ad $MASTER_DATA_DIRECTORY } function _main() { - time install_gpdb - create_fake_gpdb_src - time setup_gpadmin_user - - time make_cluster - time install_diskquota - - time test ${TOP_DIR}/diskquota_src/ true + tar -xzf /home/gpadmin/bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel + source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh + + pushd /home/gpadmin/diskquota_artifacts + # Show regress diff if test fails + export SHOW_REGRESS_DIFF=1 + time cmake --build . --target installcheck + # Run test again with standby master + activate_standby + time cmake --build . --target installcheck + popd } -_main "$@" +_main diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index 05b756720cb..951cf45de7d 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -11,6 +11,8 @@ outputs: - name: diskquota_artifacts run: - path: diskquota_src/concourse/scripts/build_diskquota.sh + path: diskquota_src/concourse/scripts/entry.sh + args: + - build params: DISKQUOTA_OS: diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml index 160048398bf..ed9bfdc2a60 100644 --- a/concourse/tasks/test_diskquota.yml +++ b/concourse/tasks/test_diskquota.yml @@ -9,6 +9,8 @@ inputs: - name: bin_cmake run: - path: diskquota_src/concourse/scripts/test_diskquota.sh + path: diskquota_src/concourse/scripts/entry.sh + args: + - test params: DISKQUOTA_OS: diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt new file mode 100644 index 00000000000..e625ee65915 --- /dev/null +++ b/tests/CMakeLists.txt @@ -0,0 +1,29 @@ +include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) + +RegressTarget_Add(regress + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + ${CMAKE_CURRENT_SOURCE_DIR}/regress/regress_init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql + EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected + DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule + REGRESS_OPTS + --load-extension=gp_inject_fault + --dbname=contrib_regression) + +RegressTarget_Add(isolation2 + REGRESS_TYPE + isolation2 + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql + EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected + DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule + REGRESS_OPTS + --load-extension=gp_inject_fault + --dbname=isolation2test) + +add_custom_target(installcheck) +add_dependencies(installcheck isolation2 regress) diff --git a/tests/Makefile b/tests/Makefile deleted file mode 100644 index 0c07a6fea6b..00000000000 --- a/tests/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: installcheck-regress -installcheck-regress: - $(MAKE) -C regress installcheck - -## Check whether we are able to run isolation2 tests. -## If the gp_inject_fault extension is not enabled, we emit a warning message for that. -.PHONY: installcheck-isolation2 -installcheck-isolation2: -ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) - $(MAKE) -C isolation2 installcheck -else - @echo -e "\033[0;33mThe gp_inject_fault extension is not enabled in the current build of Greenplum, isolation2 tests will not run." - @echo -e "To enable gp_inject_fault extension, append --enable-debug-extensions option to ./configure\033[0m" -endif diff --git a/tests/data/current_binary_name b/tests/data/current_binary_name new file mode 100755 index 00000000000..6f9daf009c6 --- /dev/null +++ b/tests/data/current_binary_name @@ -0,0 +1,9 @@ +#!/bin/bash + +cd "$(dirname "$0")" + +if [ $(grep -P '^1.0' ../../VERSION) ]; then + echo -n "diskquota.so" +else + echo -n "diskquota-$(grep -o -P '^\d+.\d+' ../../VERSION).so" +fi diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out index c322ad8b37e..d57d72f809a 100644 --- a/tests/isolation2/expected/config.out +++ b/tests/isolation2/expected/config.out @@ -1,5 +1,5 @@ -!\retcode gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); +!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); (exited with code 0) !\retcode gpstop -raf; (exited with code 0) diff --git a/tests/isolation2/sql/config.sql b/tests/isolation2/sql/config.sql index c551d847a6e..09111fd8a48 100644 --- a/tests/isolation2/sql/config.sql +++ b/tests/isolation2/sql/config.sql @@ -2,7 +2,7 @@ CREATE DATABASE diskquota; --end_ignore -!\retcode gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); +!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); !\retcode gpstop -raf; !\retcode gpconfig -c diskquota.naptime -v 0; diff --git a/tests/regress/expected/test_ctas_no_preload_lib.out b/tests/regress/expected/test_ctas_no_preload_lib.out index 38d63bf2e62..a3541b16c74 100644 --- a/tests/regress/expected/test_ctas_no_preload_lib.out +++ b/tests/regress/expected/test_ctas_no_preload_lib.out @@ -6,7 +6,7 @@ SET ROLE test; -- Create table with diskquota disabled CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); -\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null \! gpstop -far > /dev/null \c -- Make sure that the worker has started. diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index 225ff14603b..fc0deb11e31 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -1,7 +1,7 @@ --start_ignore CREATE DATABASE diskquota; -\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name); +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); \! gpstop -raf \! gpconfig -c diskquota.naptime -v 0 diff --git a/tests/regress/sql/test_ctas_no_preload_lib.sql b/tests/regress/sql/test_ctas_no_preload_lib.sql index 37ee1a7886a..717189da5ce 100644 --- a/tests/regress/sql/test_ctas_no_preload_lib.sql +++ b/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -10,7 +10,7 @@ SET ROLE test; CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); -\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null \! gpstop -far > /dev/null \c diff --git a/upgrade_test/expected/init.out b/upgrade_test/expected/init.out index c82c103ebd8..12069b85663 100644 --- a/upgrade_test/expected/init.out +++ b/upgrade_test/expected/init.out @@ -1,5 +1,5 @@ -- start_ignore -\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null -- end_ignore \! echo $? 0 diff --git a/upgrade_test/sql/init.sql b/upgrade_test/sql/init.sql index a00312f2bfc..5ee8828d5ac 100644 --- a/upgrade_test/sql/init.sql +++ b/upgrade_test/sql/init.sql @@ -1,5 +1,5 @@ -- start_ignore -\! gpconfig -c shared_preload_libraries -v $(../../cmake/current_binary_name) > /dev/null +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null -- end_ignore \! echo $? -- start_ignore From 504cb950ce596409f1a972c50e3ca81aabb9181d Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 7 Mar 2022 18:58:25 +0800 Subject: [PATCH 150/330] Dispatch only once for all tables when init() (#107) Add UDF to accelerate init_table_size_table(). In this UDF: we traverse pg_class to calculate the relation size of each relation; parse the auxiliary table's name to get the relevant primary table's oid; add the size of the auxiliary table to the primary table's size. It can avoid multiple relation_open() and dispatching pg_table_size(). Co-authored-by: Xuebin Su --- diskquota--2.0.sql | 11 +- diskquota_utility.c | 252 +++++++++++------- tests/regress/diskquota_schedule | 1 + .../expected/test_init_table_size_table.out | 71 +++++ .../sql/test_init_table_size_table.sql | 49 ++++ 5 files changed, 286 insertions(+), 98 deletions(-) create mode 100644 tests/regress/expected/test_init_table_size_table.out create mode 100644 tests/regress/sql/test_init_table_size_table.sql diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 810a9b38a53..1340a6d9e97 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -287,10 +287,13 @@ FROM -- prepare to boot INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; -CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -SELECT diskquota.diskquota_start_worker(); -DROP FUNCTION diskquota.diskquota_start_worker(); - -- re-dispatch pause status to false. in case user pause-drop-recreate. -- refer to see test case 'test_drop_after_pause' SELECT FROM diskquota.resume(); + +CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; + +-- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota_utility.c b/diskquota_utility.c index cd8b33fd1a1..0b031c24929 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -38,6 +38,7 @@ #include "utils/formatting.h" #include "utils/numeric.h" #include "libpq-fe.h" +#include "funcapi.h" #include #include @@ -58,6 +59,7 @@ PG_FUNCTION_INFO_V1(set_schema_tablespace_quota); PG_FUNCTION_INFO_V1(set_role_tablespace_quota); PG_FUNCTION_INFO_V1(set_per_segment_quota); PG_FUNCTION_INFO_V1(relation_size_local); +PG_FUNCTION_INFO_V1(pull_all_table_size); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 1200 @@ -70,8 +72,6 @@ static const char *ddl_err_code_to_err_message(MessageResult code); static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); -static bool generate_insert_table_size_sql(StringInfoData *buf, int extMajorVersion); -static char *convert_oidlist_to_string(List *oidlist); int get_ext_major_version(void); List *get_rel_oid_list(void); @@ -88,12 +88,10 @@ init_table_size_table(PG_FUNCTION_ARGS) { int ret; StringInfoData buf; - StringInfoData insert_buf; RangeVar *rv; Relation rel; int extMajorVersion; - bool insert_flag; /* * If error happens in init_table_size_table, just return error messages * to the client side. So there is no need to catch the error. @@ -125,51 +123,44 @@ init_table_size_table(PG_FUNCTION_ARGS) */ SPI_connect(); extMajorVersion = get_ext_major_version(); - char *oids = convert_oidlist_to_string(get_rel_oid_list()); /* delete all the table size info in table_size if exist. */ initStringInfo(&buf); - initStringInfo(&insert_buf); - appendStringInfo(&buf, "delete from diskquota.table_size;"); + appendStringInfo(&buf, "truncate table diskquota.table_size;"); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_DELETE) - elog(ERROR, "cannot delete table_size table: error code %d", ret); + if (ret != SPI_OK_UTILITY) + elog(ERROR, "cannot truncate table_size table: error code %d", ret); - /* fetch table size for master*/ - resetStringInfo(&buf); - appendStringInfo(&buf, - "select oid, pg_table_size(oid), -1" - " from pg_class" - " where oid in (%s);", - oids); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot fetch in pg_table_size. error code %d", ret); - - /* fill table_size table with table oid and size info for master. */ - appendStringInfo(&insert_buf, - "insert into diskquota.table_size values"); - insert_flag = generate_insert_table_size_sql(&insert_buf, extMajorVersion); - /* fetch table size on segments*/ - resetStringInfo(&buf); - appendStringInfo(&buf, - "select oid, pg_table_size(oid), gp_segment_id" - " from gp_dist_random('pg_class')" - " where oid in (%s);", - oids); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot fetch in pg_table_size. error code %d", ret); - - /* fill table_size table with table oid and size info for segments. */ - insert_flag = insert_flag | generate_insert_table_size_sql(&insert_buf, extMajorVersion); - if (insert_flag) + if (extMajorVersion == 1) + { + resetStringInfo(&buf); + appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH all_size AS " + "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id') " + "UNION ALL SELECT diskquota.pull_all_table_size()) " + "SELECT (a).tableid, sum((a).size) FROM all_size GROUP BY (a).tableid;"); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert into table_size table: error code %d", ret); + } + else { - truncateStringInfo(&insert_buf, insert_buf.len - strlen(",")); - appendStringInfo(&insert_buf, ";"); - ret = SPI_execute(insert_buf.data, false, 0); + resetStringInfo(&buf); + appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH all_size AS " + "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id')) " + "SELECT (a).* FROM all_size;"); + ret = SPI_execute(buf.data, false, 0); + if (ret != SPI_OK_INSERT) + elog(ERROR, "cannot insert into table_size table: error code %d", ret); + + resetStringInfo(&buf); + /* size is the sum of size on master and on all segments when segid == -1. */ + appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH total_size AS " + "(SELECT * from diskquota.pull_all_table_size() " + "UNION ALL SELECT tableid, size, segid FROM diskquota.table_size) " + "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;"); + ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert table_size_per_segment table: error code %d", ret); + elog(ERROR, "cannot insert into table_size table: error code %d", ret); } /* set diskquota state to ready. */ @@ -185,46 +176,138 @@ init_table_size_table(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* last_part is true means there is no other set of values to be inserted to table_size */ -static bool -generate_insert_table_size_sql(StringInfoData *insert_buf, int extMajorVersion) +static HTAB* +calculate_all_table_size() { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - bool insert_flag = false; - for(int i = 0; i < SPI_processed; i++) + Relation classRel; + HeapTuple tuple; + HeapScanDesc relScan; + Oid relid; + Oid prelid; + Size tablesize; + RelFileNodeBackend rnode; + TableEntryKey keyitem; + HTAB *local_table_size_map; + HASHCTL hashctl; + DiskQuotaActiveTableEntry *entry; + bool found; + + memset(&hashctl, 0, sizeof(hashctl)); + hashctl.keysize = sizeof(TableEntryKey); + hashctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; + + local_table_size_map = hash_create("local_table_size_map", + 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + + classRel = heap_open(RelationRelationId, AccessShareLock); + relScan = heap_beginscan_catalog(classRel, 0, NULL); + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { - HeapTuple tup; - bool isnull; - Oid oid; - int64 sz; - int16 segid; + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW && + classForm->relkind != RELKIND_INDEX && + classForm->relkind != RELKIND_AOSEGMENTS && + classForm->relkind != RELKIND_AOBLOCKDIR && + classForm->relkind != RELKIND_AOVISIMAP && + classForm->relkind != RELKIND_TOASTVALUE) + continue; - tup = SPI_tuptable->vals[i]; - oid = SPI_getbinval(tup,tupdesc, 1, &isnull); - sz = SPI_getbinval(tup,tupdesc, 2, &isnull); - segid = SPI_getbinval(tup,tupdesc, 3, &isnull); - switch (extMajorVersion) + relid = HeapTupleGetOid(tuple); + /* ignore system table */ + if (relid < FirstNormalObjectId) + continue; + + rnode.node.dbNode = MyDatabaseId; + rnode.node.relNode = classForm->relfilenode; + rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + tablesize = calculate_relation_size_all_forks(&rnode, classForm->relstorage); + + keyitem.reloid = relid; + keyitem.segid = GpIdentity.segindex; + + prelid = diskquota_parse_primary_table_oid(classForm->relnamespace, classForm->relname.data); + if (OidIsValid(prelid)) { - case 1: - /* for version 1.0, only insert the values from master */ - if (segid == -1) - { - appendStringInfo(insert_buf, " ( %u, %ld),", oid, sz); - insert_flag = true; - } - break; - case 2: - appendStringInfo(insert_buf, " ( %u, %ld, %d),", oid, sz, segid); - insert_flag = true; - break; - default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + keyitem.reloid = prelid; + } + entry = hash_search(local_table_size_map, &keyitem, HASH_ENTER, &found); + if (!found) + { + entry->tablesize = 0; } + entry->tablesize += tablesize; + } + heap_endscan(relScan); + heap_close(classRel, AccessShareLock); + + return local_table_size_map; +} + +Datum +pull_all_table_size(PG_FUNCTION_ARGS) +{ + DiskQuotaActiveTableEntry *entry; + FuncCallContext *funcctx; + struct PullAllTableSizeCtx { + HASH_SEQ_STATUS iter; + HTAB *local_table_size_map; + } *table_size_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = CreateTemplateTupleDesc(3, false /*hasoid*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "TABLEID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "SIZE", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "SEGID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* Create a local hash table and fill it with entries from shared memory. */ + table_size_ctx = (struct PullAllTableSizeCtx *) palloc(sizeof(struct PullAllTableSizeCtx)); + table_size_ctx->local_table_size_map = calculate_all_table_size(); + + /* Setup first calling context. */ + hash_seq_init(&(table_size_ctx->iter), table_size_ctx->local_table_size_map); + funcctx->user_fctx = (void *) table_size_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + table_size_ctx = (struct PullAllTableSizeCtx *) funcctx->user_fctx; + + while ((entry = hash_seq_search(&(table_size_ctx->iter))) != NULL) + { + Datum result; + Datum values[3]; + bool nulls[3]; + HeapTuple tuple; + + values[0] = ObjectIdGetDatum(entry->reloid); + values[1] = Int64GetDatum(entry->tablesize); + values[2] = Int16GetDatum(entry->segid); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); } - return insert_flag; + + SRF_RETURN_DONE(funcctx); } /* * Trigger to start diskquota worker when create extension diskquota. @@ -1167,25 +1250,6 @@ get_ext_major_version(void) return 0; } -static char * -convert_oidlist_to_string(List *oidlist) -{ - StringInfoData buf; - bool hasOid = false; - ListCell *l; - initStringInfo(&buf); - - foreach(l, oidlist) - { - Oid oid = lfirst_oid(l); - appendStringInfo(&buf, "%u, ", oid); - hasOid = true; - } - if (hasOid) - truncateStringInfo(&buf, buf.len - strlen(", ")); - return buf.data; -} - /* * Get the list of oids of the tables which diskquota * needs to care about in the database. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 3598cd751e1..e1350e694db 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,5 +1,6 @@ test: config test: test_create_extension +test: test_init_table_size_table test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size diff --git a/tests/regress/expected/test_init_table_size_table.out b/tests/regress/expected/test_init_table_size_table.out new file mode 100644 index 00000000000..30744e0f6f5 --- /dev/null +++ b/tests/regress/expected/test_init_table_size_table.out @@ -0,0 +1,71 @@ +-- heap table +CREATE TABLE t(i int) DISTRIBUTED BY (i); +INSERT INTO t SELECT generate_series(1, 100000); +-- heap table index +CREATE INDEX idx on t(i); +-- toast table +CREATE TABLE toast(t text) DISTRIBUTED BY (t); +INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); +-- toast table index +CREATE INDEX toast_idx on toast(t); +-- AO table +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +-- AO table index +CREATE INDEX ao_idx on ao(i); +-- AOCS table +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +-- AOCS table index +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Tables here are fetched by diskquota_fetch_table_stat() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 327680 | -1 + ao | 1591464 | -1 + ao_idx | 2490368 | -1 + aocs | 10813592 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +-- init diskquota.table_size +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- diskquota.table_size should not change after init_table_size_table() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 327680 | -1 + ao | 1591464 | -1 + ao_idx | 2490368 | -1 + aocs | 10813592 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +DROP TABLE t; +DROP TABLE toast; +DROP TABLE ao; +DROP TABLE aocs; diff --git a/tests/regress/sql/test_init_table_size_table.sql b/tests/regress/sql/test_init_table_size_table.sql new file mode 100644 index 00000000000..9acc928f995 --- /dev/null +++ b/tests/regress/sql/test_init_table_size_table.sql @@ -0,0 +1,49 @@ +-- heap table +CREATE TABLE t(i int) DISTRIBUTED BY (i); +INSERT INTO t SELECT generate_series(1, 100000); + +-- heap table index +CREATE INDEX idx on t(i); + +-- toast table +CREATE TABLE toast(t text) DISTRIBUTED BY (t); +INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); + +-- toast table index +CREATE INDEX toast_idx on toast(t); + +-- AO table +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); + +-- AO table index +CREATE INDEX ao_idx on ao(i); + +-- AOCS table +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; + +-- AOCS table index +CREATE INDEX aocs_idx on aocs(i); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- Tables here are fetched by diskquota_fetch_table_stat() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + +-- init diskquota.table_size +SELECT diskquota.init_table_size_table(); + +-- diskquota.table_size should not change after init_table_size_table() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + +DROP TABLE t; +DROP TABLE toast; +DROP TABLE ao; +DROP TABLE aocs; From 240a6b7959a21426fe33a41b3024040d5fd65c58 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 8 Mar 2022 11:21:02 +0800 Subject: [PATCH 151/330] Pack the last released so in the pipeline (#172) * Pack the last released so in the pipeline Due to our upgrade design, the newly released diskquota should contain all the so file of every latest minor release. The packaging system has been implemented by cmake DISKQUOTA_PREVIOUS_INSTALLER. This commit get the latest release through gcs regexp and pass it to the build script. The pipeline has been split into different jobs based on the OS type because of: - Easier to use the common resource name for build_diskquota.yml - job_def.lib.yml becomes shorter - Can span jobs into different concourse workers, which may bring some better performance. - Running a flaky test again is easier. Also, a gate job has been added which is the main entrance for all pipelines. In the future, we may want to have different gate jobs for different pipelines, like a clang-format checking gate for the PR pipeline. --- CMakeLists.txt | 6 +-- concourse/fly.sh | 2 +- concourse/pipeline/commit.yml | 29 +++++++---- concourse/pipeline/dev.yml | 22 +++++--- concourse/pipeline/job_def.lib.yml | 70 ++++++++++++++------------ concourse/pipeline/pr.yml | 30 ++++++----- concourse/pipeline/res_def.yml | 29 +++++++++++ concourse/pipeline/trigger_def.lib.yml | 28 +++++------ concourse/scripts/build_diskquota.sh | 4 +- concourse/tasks/build_diskquota.yml | 1 + 10 files changed, 139 insertions(+), 82 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1297f0ecfa4..0ce232b2540 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,9 +84,9 @@ add_subdirectory(tests) # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) -if(DEFINED DISKQUOTA_PREVIOUS_INSTALLER) - message(STATUS "Copy pervious installer from ${DISKQUOTA_PREVIOUS_INSTALLER}") - file(ARCHIVE_EXTRACT INPUT ${DISKQUOTA_PREVIOUS_INSTALLER} PATTERNS "*.so") +if(DEFINED DISKQUOTA_LAST_RELEASE_PATH) + message(STATUS "Copy pervious installer from ${DISKQUOTA_LAST_RELEASE_PATH}") + file(ARCHIVE_EXTRACT INPUT ${DISKQUOTA_LAST_RELEASE_PATH} PATTERNS "*.so") file(GLOB DISKQUOTA_PREVIOUS_LIBRARY "${CMAKE_BINARY_DIR}/lib/postgresql/*.so") install(PROGRAMS ${DISKQUOTA_PREVIOUS_LIBRARY} DESTINATION "lib/postgresql/") diff --git a/concourse/fly.sh b/concourse/fly.sh index efa59701211..d4c6e6c42ad 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -99,7 +99,7 @@ set -v -c "${yml_path}" \ -l "${workspace}/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml" \ -l "${workspace}/gp-continuous-integration/secrets/gp-extensions-common.yml" \ - -l "${workspace}/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml" \ + -l "${workspace}/gp-continuous-integration/secrets/gpdb_6X_STABLE-ci-secrets.prod.yml" \ -v "diskquota-branch=${branch}" set +v diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml index d6875d1839e..df5efa78a5e 100644 --- a/concourse/pipeline/commit.yml +++ b/concourse/pipeline/commit.yml @@ -1,4 +1,5 @@ #@ load("job_def.lib.yml", +#@ "gate_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -11,18 +12,24 @@ #@ load("base.lib.yml", "declare_res", "declare_res_type") #@ res_map = {} #@ res_type_map = {} -#@ job_param = { -#@ "res_map": res_map, -#@ "trigger": commit_trigger(res_map), -#@ "gpdb_src": "gpdb6_src", -#@ "confs": [ -#@ centos6_gpdb6_conf(), -#@ centos7_gpdb6_conf(), -#@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf()] -#@ } +#@ trigger = commit_trigger(res_map) +#@ confs = [ +#@ centos6_gpdb6_conf(), +#@ centos7_gpdb6_conf(), +#@ rhel8_gpdb6_conf(), +#@ ubuntu18_gpdb6_conf() +#@ ] jobs: -- #@ build_test_job(job_param) +- #@ gate_job(trigger) +#@ for conf in confs: +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ "gpdb_src": "gpdb6_src", +#@ "conf": conf +#@ } +- #@ build_test_job(param) +#@ end resources: #@ declare_res(res_type_map, res_map) diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index a1979284981..83fd6b17aa5 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -1,4 +1,5 @@ #@ load("job_def.lib.yml", +#@ "gate_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -11,15 +12,20 @@ #@ load("base.lib.yml", "declare_res", "declare_res_type") #@ res_map = {} #@ res_type_map = {} -#@ job_param = { -#@ "res_map": res_map, -#@ "trigger": commit_dev_trigger(res_map), -#@ "gpdb_src": "gpdb6_src", -#@ "confs": [ -#@ ubuntu18_gpdb6_conf()] -#@ } +#@ trigger = commit_dev_trigger(res_map) +#@ confs= [ +#@ ubuntu18_gpdb6_conf()] jobs: -- #@ build_test_job(job_param) +- #@ gate_job(trigger) +#@ for conf in confs: +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ "gpdb_src": "gpdb6_src", +#@ "conf": conf +#@ } +- #@ build_test_job(param) +#@ end resources: #@ declare_res(res_type_map, res_map) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index f8db0f3f782..f7edc0f2501 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -1,11 +1,13 @@ #@ load("base.lib.yml", "add_res_by_conf", "add_res_by_name") +#@ load("@ytt:template", "template") #! Job config for centos7 #@ def centos6_gpdb6_conf(): res_build_image: centos6-gpdb6-image-build res_test_image: centos6-gpdb6-image-test res_gpdb_bin: bin_gpdb6_centos6 -diskquota_os: rhel6 +res_diskquota_bin: bin_diskquota_gpdb6_rhel6 +os: rhel6 #@ end #! Job config for centos7 @@ -13,7 +15,8 @@ diskquota_os: rhel6 res_build_image: centos7-gpdb6-image-build res_test_image: centos7-gpdb6-image-test res_gpdb_bin: bin_gpdb6_centos7 -diskquota_os: rhel7 +res_diskquota_bin: bin_diskquota_gpdb6_rhel7 +os: rhel7 #@ end #! Job config for rhel8 @@ -21,7 +24,8 @@ diskquota_os: rhel7 res_build_image: rhel8-gpdb6-image-build res_test_image: rhel8-gpdb6-image-test res_gpdb_bin: bin_gpdb6_rhel8 -diskquota_os: rhel8 +res_diskquota_bin: bin_diskquota_gpdb6_rhel8 +os: rhel8 #@ end #! Job config for ubuntu18 @@ -29,11 +33,23 @@ diskquota_os: rhel8 res_build_image: ubuntu18-gpdb6-image-build res_test_image: ubuntu18-gpdb6-image-test res_gpdb_bin: bin_gpdb6_ubuntu18 -diskquota_os: ubuntu18.04 +res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 +os: ubuntu18.04 +#@ end + +#@ def gate_job(trigger): +name: gate +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +plan: +#@ for to_get in trigger["to_get"]: +- trigger: true + _: #@ template.replace(to_get) +#@ end #@ end #@ def _build_task(conf): -task: #@ "build_" + conf["diskquota_os"] +task: #@ "build_" + conf["os"] file: diskquota_src/concourse/tasks/build_diskquota.yml image: #@ conf["res_build_image"] input_mapping: @@ -42,63 +58,53 @@ input_mapping: #! output_mapping is necessary. Otherwise we may use a wrong #! diskquota_bin in the test task. output_mapping: - "diskquota_artifacts": #@ "diskquota_artifacts_" + conf["diskquota_os"] + "diskquota_artifacts": #@ "diskquota_artifacts_" + conf["os"] params: - DISKQUOTA_OS: #@ conf["diskquota_os"] + DISKQUOTA_OS: #@ conf["os"] #@ end #@ def _test_task(conf): -task: #@ "test_" + conf["diskquota_os"] +task: #@ "test_" + conf["os"] timeout: 1h file: diskquota_src/concourse/tasks/test_diskquota.yml image: #@ conf["res_test_image"] input_mapping: bin_gpdb: #@ conf["res_gpdb_bin"] - bin_diskquota: #@ "diskquota_artifacts_" + conf["diskquota_os"] + bin_diskquota: #@ "diskquota_artifacts_" + conf["os"] params: - DISKQUOTA_OS: #@ conf["diskquota_os"] + DISKQUOTA_OS: #@ conf["os"] #@ end #@ def build_test_job(param): #@ res_map = param["res_map"] #@ trigger = param["trigger"] -#@ confs = param["confs"] +#@ conf = param["conf"] #@ add_res_by_name(res_map, param["gpdb_src"]) #@ add_res_by_name(res_map, "bin_cmake") -name: build_test +#@ add_res_by_conf(res_map, conf) +name: #@ "build_test_" + conf["os"] max_in_flight: 10 on_success: #@ trigger["on_success"] on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: -#@ for trigger_plan in trigger["plans"]: -- #@ trigger_plan +#@ for to_get in trigger["to_get"]: +- passed: [gate] + trigger: true + _: #@ template.replace(to_get) +#@ end +#@ for to_put in trigger["to_put"]: +- #@ to_put #@ end - in_parallel: - get: gpdb_src resource: #@ param["gpdb_src"] - get: bin_cmake -#@ for conf in confs: -#@ add_res_by_conf(res_map, conf) -#@ if conf["res_build_image"] == conf["res_test_image"]: - - get: #@ conf["res_build_image"] -#@ else: - get: #@ conf["res_build_image"] - get: #@ conf["res_test_image"] -#@ end - get: #@ conf["res_gpdb_bin"] -#@ end -#@ if len(confs) == 1: -#@ conf = confs[0] + - get: last_released_diskquota_bin + resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) - #@ _test_task(conf) -#@ else: -- in_parallel: - steps: -#@ for conf in confs: - - do: - - #@ _build_task(conf) - - #@ _test_task(conf) -#@ end -#@ end #@ end diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 3cd4a55a5b6..a145109ff1f 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -1,4 +1,5 @@ #@ load("job_def.lib.yml", +#@ "gate_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -13,19 +14,24 @@ #@ "declare_res_type") #@ res_map = {} #@ res_type_map = {} -#@ job_param = { -#@ "res_map": res_map, -#@ "gpdb_src": "gpdb6_src", -#@ "trigger": pr_trigger(res_map), -#@ "confs": [ -#@ centos6_gpdb6_conf(), -#@ centos7_gpdb6_conf(), -#@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf() -#@ ] -#@ } +#@ trigger = pr_trigger(res_map) +#@ confs = [ +#@ centos6_gpdb6_conf(), +#@ centos7_gpdb6_conf(), +#@ rhel8_gpdb6_conf(), +#@ ubuntu18_gpdb6_conf() +#@ ] jobs: -- #@ build_test_job(job_param) +- #@ gate_job(trigger) +#@ for conf in confs: +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ "gpdb_src": "gpdb6_src", +#@ "conf": conf +#@ } +- #@ build_test_job(param) +#@ end resources: #@ declare_res(res_type_map, res_map) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index c7953d324e4..1d4f3e9ae3c 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -128,6 +128,35 @@ resources: json_key: ((concourse-gcs-resources-service-account-key)) versioned_file: 6X_STABLE/bin_gpdb_rhel8/bin_gpdb.tar.gz +# Diskquota releases +- name: bin_diskquota_gpdb6_rhel6 + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel6_x86_64.tar.gz + +- name: bin_diskquota_gpdb6_rhel7 + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel7_x86_64.tar.gz + +- name: bin_diskquota_gpdb6_rhel8 + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel8_x86_64.tar.gz + +- name: bin_diskquota_gpdb6_ubuntu18 + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz + # Other dependencies - name: bin_cmake type: gcs diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml index 92e03df5421..3a1f12b6d2b 100644 --- a/concourse/pipeline/trigger_def.lib.yml +++ b/concourse/pipeline/trigger_def.lib.yml @@ -3,16 +3,16 @@ #! PR trigger. For pull request pipelines #@ def pr_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_pr") -plans: -- get: diskquota_src - resource: diskquota_pr - params: - fetch_tags: true - trigger: true -- put: diskquota_pr - params: - path: diskquota_src - status: pending +to_get: + - get: diskquota_src + resource: diskquota_pr + params: + fetch_tags: true +to_put: + - put: diskquota_pr + params: + path: diskquota_src + status: pending on_failure: put: diskquota_pr params: @@ -33,10 +33,10 @@ on_success: #! Commit trigger. For master pipelines #@ def commit_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") -plans: +to_get: - get: diskquota_src resource: diskquota_commit - trigger: true +to_put: #@ [] #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. #! Unfortunately it doesn't work with Concourse 5. on_success: @@ -47,10 +47,10 @@ on_error: #! Commit trigger. For dev pipelines. No webhook #@ def commit_dev_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit_dev") -plans: +to_get: - get: diskquota_src resource: diskquota_commit_dev - trigger: true +to_put: #@ [] #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. #! Unfortunately it doesn't work with Concourse 5. on_success: diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index ea443540612..cadb9285dce 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -11,7 +11,9 @@ function pkg() { fi pushd /home/gpadmin/diskquota_artifacts - cmake /home/gpadmin/diskquota_src + local last_release_path + last_release_path=$(readlink -e /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) + cmake /home/gpadmin/diskquota_src -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" cmake --build . --target package popd } diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index 951cf45de7d..5fe11afa0c4 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -6,6 +6,7 @@ inputs: - name: diskquota_src - name: gpdb_src - name: bin_cmake + - name: last_released_diskquota_bin outputs: - name: diskquota_artifacts From 283775f69f77b1c6bb8e7d6a050d3b89c07ced9c Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 8 Mar 2022 15:53:12 +0800 Subject: [PATCH 152/330] add upgrade and downgrade test (#166) --- CMakeLists.txt | 7 +- concourse/scripts/test_diskquota.sh | 2 + diskquota--1.0--2.0.sql | 185 +++++++++--- diskquota--2.0--1.0.sql | 1 + diskquota--2.0.sql | 84 +++--- diskquota.c | 88 ++++-- diskquota.h | 3 + diskquota_utility.c | 104 +++++-- upgrade_test/CMakeLists.txt | 17 ++ upgrade_test/Makefile | 5 - upgrade_test/README.md | 36 ++- upgrade_test/diskquota_schedule_downgrade | 23 -- upgrade_test/diskquota_schedule_upgrade | 22 -- upgrade_test/expected/1.0_catalog.out | 135 +++++++++ upgrade_test/expected/1.0_cleanup_quota.out | 1 + upgrade_test/expected/1.0_install.out | 14 + .../expected/1.0_migrate_to_version_1.0.out | 12 + upgrade_test/expected/1.0_set_quota.out | 35 +++ .../1.0_test_in_2.0_quota_create_in_1.0.out | 10 + upgrade_test/expected/2.0_catalog.out | 264 ++++++++++++++++++ upgrade_test/expected/2.0_cleanup_quota.out | 1 + upgrade_test/expected/2.0_install.out | 14 + .../expected/2.0_migrate_to_version_2.0.out | 10 + upgrade_test/expected/2.0_set_quota.out | 63 +++++ .../2.0_test_in_1.0_quota_create_in_2.0.out | 14 + upgrade_test/expected/clean.out | 44 --- upgrade_test/expected/downgrade_extension.out | 2 - upgrade_test/expected/init.out | 21 -- upgrade_test/expected/install_new_version.out | 2 - upgrade_test/expected/install_old_version.out | 2 - upgrade_test/expected/prepare.out | 76 ----- upgrade_test/expected/set_config.out | 94 ------- upgrade_test/expected/test_delete_quota.out | 34 --- upgrade_test/expected/test_rename.out | 64 ----- upgrade_test/expected/test_reschema.out | 36 --- upgrade_test/expected/test_role.out | 46 --- upgrade_test/expected/test_schema.out | 57 ---- .../expected/test_tablespace_role.out | 1 - .../expected/test_tablespace_role_perseg.out | 1 - .../expected/test_tablespace_schema.out | 1 - .../test_tablespace_schema_perseg.out | 1 - upgrade_test/expected/test_temp_role.out | 36 --- upgrade_test/expected/upgrade_extension.out | 2 - upgrade_test/schedule_1.0--2.0 | 8 + upgrade_test/schedule_2.0--1.0 | 8 + upgrade_test/sql/1.0_catalog.sql | 80 ++++++ upgrade_test/sql/1.0_cleanup_quota.sql | 1 + upgrade_test/sql/1.0_install.sql | 17 ++ .../sql/1.0_migrate_to_version_1.0.sql | 10 + .../sql/1.0_not_work_using_2.x_binary.sql | 21 ++ upgrade_test/sql/1.0_set_quota.sql | 25 ++ .../1.0_test_in_2.0_quota_create_in_1.0.sql | 11 + upgrade_test/sql/2.0_catalog.sql | 81 ++++++ upgrade_test/sql/2.0_cleanup_quota.sql | 1 + upgrade_test/sql/2.0_install.sql | 17 ++ .../sql/2.0_migrate_to_version_2.0.sql | 8 + .../sql/2.0_not_work_using_1.x_binary.sql | 23 ++ upgrade_test/sql/2.0_set_quota.sql | 44 +++ .../2.0_test_in_1.0_quota_create_in_2.0.sql | 16 ++ upgrade_test/sql/clean.sql | 28 -- upgrade_test/sql/init.sql | 19 -- upgrade_test/sql/install_new_version.sql | 2 - upgrade_test/sql/install_old_version.sql | 2 - upgrade_test/sql/prepare.sql | 32 --- upgrade_test/sql/set_config.sql | 41 --- upgrade_test/sql/test.sh | 4 - upgrade_test/sql/test_delete_quota.sql | 19 -- upgrade_test/sql/test_manytable.sql | 30 -- upgrade_test/sql/test_rename.sql | 50 ---- upgrade_test/sql/test_reschema.sql | 20 -- upgrade_test/sql/test_role.sql | 37 --- upgrade_test/sql/test_schema.sql | 36 --- upgrade_test/sql/test_tablespace_role.sql | 1 - .../sql/test_tablespace_role_perseg.sql | 1 - upgrade_test/sql/test_tablespace_schema.sql | 1 - .../sql/test_tablespace_schema_perseg.sql | 1 - upgrade_test/sql/test_temp_role.sql | 24 -- 77 files changed, 1347 insertions(+), 1042 deletions(-) create mode 100644 upgrade_test/CMakeLists.txt delete mode 100644 upgrade_test/Makefile delete mode 100644 upgrade_test/diskquota_schedule_downgrade delete mode 100644 upgrade_test/diskquota_schedule_upgrade create mode 100644 upgrade_test/expected/1.0_catalog.out create mode 100644 upgrade_test/expected/1.0_cleanup_quota.out create mode 100644 upgrade_test/expected/1.0_install.out create mode 100644 upgrade_test/expected/1.0_migrate_to_version_1.0.out create mode 100644 upgrade_test/expected/1.0_set_quota.out create mode 100644 upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out create mode 100644 upgrade_test/expected/2.0_catalog.out create mode 100644 upgrade_test/expected/2.0_cleanup_quota.out create mode 100644 upgrade_test/expected/2.0_install.out create mode 100644 upgrade_test/expected/2.0_migrate_to_version_2.0.out create mode 100644 upgrade_test/expected/2.0_set_quota.out create mode 100644 upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out delete mode 100644 upgrade_test/expected/clean.out delete mode 100644 upgrade_test/expected/downgrade_extension.out delete mode 100644 upgrade_test/expected/init.out delete mode 100644 upgrade_test/expected/install_new_version.out delete mode 100644 upgrade_test/expected/install_old_version.out delete mode 100644 upgrade_test/expected/prepare.out delete mode 100644 upgrade_test/expected/set_config.out delete mode 100644 upgrade_test/expected/test_delete_quota.out delete mode 100644 upgrade_test/expected/test_rename.out delete mode 100644 upgrade_test/expected/test_reschema.out delete mode 100644 upgrade_test/expected/test_role.out delete mode 100644 upgrade_test/expected/test_schema.out delete mode 120000 upgrade_test/expected/test_tablespace_role.out delete mode 120000 upgrade_test/expected/test_tablespace_role_perseg.out delete mode 120000 upgrade_test/expected/test_tablespace_schema.out delete mode 120000 upgrade_test/expected/test_tablespace_schema_perseg.out delete mode 100644 upgrade_test/expected/test_temp_role.out delete mode 100644 upgrade_test/expected/upgrade_extension.out create mode 100644 upgrade_test/schedule_1.0--2.0 create mode 100644 upgrade_test/schedule_2.0--1.0 create mode 100644 upgrade_test/sql/1.0_catalog.sql create mode 100644 upgrade_test/sql/1.0_cleanup_quota.sql create mode 100644 upgrade_test/sql/1.0_install.sql create mode 100644 upgrade_test/sql/1.0_migrate_to_version_1.0.sql create mode 100644 upgrade_test/sql/1.0_not_work_using_2.x_binary.sql create mode 100644 upgrade_test/sql/1.0_set_quota.sql create mode 100644 upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql create mode 100644 upgrade_test/sql/2.0_catalog.sql create mode 100644 upgrade_test/sql/2.0_cleanup_quota.sql create mode 100644 upgrade_test/sql/2.0_install.sql create mode 100644 upgrade_test/sql/2.0_migrate_to_version_2.0.sql create mode 100644 upgrade_test/sql/2.0_not_work_using_1.x_binary.sql create mode 100644 upgrade_test/sql/2.0_set_quota.sql create mode 100644 upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql delete mode 100644 upgrade_test/sql/clean.sql delete mode 100644 upgrade_test/sql/init.sql delete mode 100644 upgrade_test/sql/install_new_version.sql delete mode 100644 upgrade_test/sql/install_old_version.sql delete mode 100644 upgrade_test/sql/prepare.sql delete mode 100644 upgrade_test/sql/set_config.sql delete mode 100644 upgrade_test/sql/test.sh delete mode 100644 upgrade_test/sql/test_delete_quota.sql delete mode 100644 upgrade_test/sql/test_manytable.sql delete mode 100644 upgrade_test/sql/test_rename.sql delete mode 100644 upgrade_test/sql/test_reschema.sql delete mode 100644 upgrade_test/sql/test_role.sql delete mode 100644 upgrade_test/sql/test_schema.sql delete mode 120000 upgrade_test/sql/test_tablespace_role.sql delete mode 120000 upgrade_test/sql/test_tablespace_role_perseg.sql delete mode 120000 upgrade_test/sql/test_tablespace_schema.sql delete mode 120000 upgrade_test/sql/test_tablespace_schema_perseg.sql delete mode 100644 upgrade_test/sql/test_temp_role.sql diff --git a/CMakeLists.txt b/CMakeLists.txt index 0ce232b2540..e0a2994d602 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,9 +41,9 @@ endif() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} \ -DDISKQUOTA_VERSION='\"${DISKQUOTA_VERSION}\"' \ - -DDISKQUOTA_MAJOR_VERSION='\"${DISKQUOTA_MAJOR_VERSION}\"' \ - -DDISKQUOTA_MINOR_VERSION='\"${DISKQUOTA_MINOR_VERSION}\"' \ - -DDISKQUOTA_PATCH_VERSION='\"${DISKQUOTA_PATCH_VERSION}\"' \ + -DDISKQUOTA_MAJOR_VERSION=${DISKQUOTA_MAJOR_VERSION} \ + -DDISKQUOTA_MINOR_VERSION=${DISKQUOTA_MINOR_VERSION} \ + -DDISKQUOTA_PATCH_VERSION=${DISKQUOTA_PATCH_VERSION} \ -DDISKQUOTA_BINARY_NAME='\"${DISKQUOTA_BINARY_NAME}\"'") list( @@ -80,6 +80,7 @@ set_target_properties( # Add installcheck targets add_subdirectory(tests) +add_subdirectory(upgrade_test) # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 42701c38dec..c34fa656507 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -20,6 +20,8 @@ function _main() { # Run test again with standby master activate_standby time cmake --build . --target installcheck + # Run upgrade test (with standby master) + time cmake --build . --target upgradecheck popd } diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 74fa0d410e2..40fd5f81060 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -83,6 +83,7 @@ CREATE FUNCTION diskquota.relation_size_local( relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ SELECT SUM(size)::bigint FROM ( SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size @@ -111,45 +112,161 @@ SELECT ( ) AS dbsize; /* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND qc.quotatype = 0 AND ts.segid = -1 -GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname -ORDER BY pgns.nspname; +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA /* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND qc.quotatype = 1 AND ts.segid = -1 -GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_tablespace_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns, - pg_tablespace AS pgsp, - diskquota.target AS t -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace AND pgsp.oid = pgc.reltablespace AND qc.quotatype = 2 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid = -1 -GROUP BY relnamespace, reltablespace, qc.quotalimitMB, pgns.nspname, pgsp.spcname -ORDER BY pgns.nspname, pgsp.spcname; +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + targetOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.primaryOid AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, pgsp.spcname AS tablespace_name, pgc.reltablespace AS tablespace_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_tablespace_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr, - pg_tablespace AS pgsp, - diskquota.target AS t -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid AND pgsp.oid = pgc.reltablespace AND qc.quotatype = 3 AND qc.targetoid=t.primaryoid AND t.tablespaceoid=pgc.reltablespace AND ts.segid = -1 -GROUP BY pgc.relowner, reltablespace, pgr.rolname, pgsp.spcname, qc.quotalimitMB; +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + targetOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.primaryOid AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON targetoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; -- views end diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index fbde6e98a82..cba56dcaf26 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -57,6 +57,7 @@ DROP FUNCTION diskquota.relation_size_local( relfilenode oid, relpersistence "char", relstorage "char"); +DROP FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint); DROP FUNCTION diskquota.relation_size(relation regclass); DROP FUNCTION diskquota.show_relation_cache_all_seg(); -- UDF end diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 1340a6d9e97..6b11d24f1a2 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -97,6 +97,7 @@ CREATE FUNCTION diskquota.relation_size_local( relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ SELECT SUM(size)::bigint FROM ( @@ -117,12 +118,12 @@ CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota. -- view part CREATE VIEW diskquota.show_fast_schema_quota_view AS -WITH +WITH quota_usage AS ( - SELECT - relnamespace, + SELECT + relnamespace, SUM(size) AS total_size - FROM + FROM diskquota.table_size, pg_class WHERE @@ -131,14 +132,14 @@ WITH GROUP BY relnamespace ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN quota_usage ON pg_namespace.oid = relnamespace WHERE quotaType = 0; -- NAMESPACE_QUOTA @@ -149,7 +150,7 @@ WITH SELECT relowner, SUM(size) AS total_size - FROM + FROM diskquota.table_size, pg_class WHERE @@ -158,14 +159,14 @@ WITH GROUP BY relowner ) -SELECT +SELECT rolname AS role_name, targetoid AS role_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner WHERE quotaType = 1; -- ROLE_QUOTA @@ -188,12 +189,12 @@ WITH quota_usage AS ( SELECT relnamespace, - CASE + CASE WHEN reltablespace = 0 THEN dattablespace ELSE reltablespace END AS reltablespace, SUM(size) AS total_size - FROM + FROM diskquota.table_size, pg_class, default_tablespace @@ -206,29 +207,29 @@ WITH dattablespace ), full_quota_config AS ( - SELECT + SELECT targetOid, tablespaceoid, quotalimitMB - FROM - diskquota.quota_config AS config, + FROM + diskquota.quota_config AS config, diskquota.target AS target - WHERE - config.targetOid = target.primaryOid AND + WHERE + config.targetOid = target.primaryOid AND config.quotaType = target.quotaType AND config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA ) -SELECT +SELECT nspname AS schema_name, targetoid AS schema_oid, - spcname AS tablespace_name, + spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN +FROM + full_quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS @@ -240,12 +241,12 @@ WITH quota_usage AS ( SELECT relowner, - CASE + CASE WHEN reltablespace = 0 THEN dattablespace ELSE reltablespace END AS reltablespace, SUM(size) AS total_size - FROM + FROM diskquota.table_size, pg_class, default_tablespace @@ -258,29 +259,29 @@ WITH dattablespace ), full_quota_config AS ( - SELECT + SELECT targetOid, tablespaceoid, quotalimitMB - FROM - diskquota.quota_config AS config, + FROM + diskquota.quota_config AS config, diskquota.target AS target WHERE - config.targetOid = target.primaryOid AND + config.targetOid = target.primaryOid AND config.quotaType = target.quotaType AND config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA ) -SELECT +SELECT rolname AS role_name, targetoid AS role_oid, - spcname AS tablespace_name, + spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON targetoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN +FROM + full_quota_config JOIN + pg_roles ON targetoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; -- view end @@ -291,7 +292,6 @@ INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, -- refer to see test case 'test_drop_after_pause' SELECT FROM diskquota.resume(); -CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; -- Starting the worker has to be the last step. CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; diff --git a/diskquota.c b/diskquota.c index 5c61a9423c4..d7da502594a 100644 --- a/diskquota.c +++ b/diskquota.c @@ -41,8 +41,12 @@ PG_MODULE_MAGIC; #define DISKQUOTA_DB "diskquota" #define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" -#ifndef DISKQUOTA_BINARY_NAME - #error DISKQUOTA_BINARY_NAME should be defined by build system +#if !defined(DISKQUOTA_VERSION) || \ + !defined(DISKQUOTA_MAJOR_VERSION) || \ + !defined(DISKQUOTA_PATCH_VERSION) || \ + !defined(DISKQUOTA_MINOR_VERSION) || \ + !defined(DISKQUOTA_BINARY_NAME) + #error Version not found. Please check if the VERSION file exists. #endif #include // for useconds_t @@ -122,14 +126,19 @@ extern void invalidate_database_blackmap(Oid dbid); void _PG_init(void) { - BackgroundWorker worker; + /* diskquota.so must be in shared_preload_libraries to init SHM. */ + if (!process_shared_preload_libraries_in_progress) { + ereport(ERROR, ( + errmsg("booting diskquota-" DISKQUOTA_VERSION ", but " + DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort.") + )); + } else { + ereport(INFO, (errmsg("booting diskquota-"DISKQUOTA_VERSION))); + } + BackgroundWorker worker; memset(&worker, 0, sizeof(BackgroundWorker)); - /* diskquota.so must be in shared_preload_libraries to init SHM. */ - if (!process_shared_preload_libraries_in_progress) - ereport(ERROR, (errmsg(DISKQUOTA_BINARY_NAME " not in shared_preload_libraries."))); - /* values are used in later calls */ define_guc_variables(); @@ -303,14 +312,6 @@ disk_quota_worker_main(Datum main_arg) PGC_USERSET,PGC_S_SESSION, GUC_ACTION_SAVE, true, 0); - /* - * Set ps display name of the worker process of diskquota, so we can - * distinguish them quickly. Note: never mind parameter name of the - * function `init_ps_display`, we only want the ps name looks like - * 'bgworker: [diskquota] ...' - */ - init_ps_display("bgworker:", "[diskquota]", dbname, ""); - /* diskquota worker should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; @@ -320,6 +321,55 @@ disk_quota_worker_main(Datum main_arg) */ init_disk_quota_model(); + // check current binary version and SQL DLL version are matched + int times = 0; + while (!got_sigterm) { + CHECK_FOR_INTERRUPTS(); + + int major = -1, minor = -1; + int has_error = worker_spi_get_extension_version(&major, &minor) != 0; + + if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) + break; + + if (has_error) { + static char _errfmt[] = "find issues in pg_class.pg_extension check server log. waited %d seconds", + _errmsg[sizeof(_errfmt) + sizeof("2147483647" /* INT_MAX */) + 1] = {}; + snprintf(_errmsg, sizeof(_errmsg), _errfmt, times * diskquota_naptime); + + init_ps_display("bgworker:", "[diskquota]", dbname, _errmsg); + } else { + init_ps_display("bgworker:", "[diskquota]", dbname, + "v" DISKQUOTA_VERSION " is not matching with current SQL. stop working"); + } + + ereportif( + !has_error && times == 0, + WARNING, + (errmsg("[diskquota] worker for '%s' detected the installed version is %d.%d, " + "but current version is %s. abort due to version not match", dbname, major, minor, DISKQUOTA_VERSION), + errhint("run alter extension diskquota update to '%d.%d'", + DISKQUOTA_MAJOR_VERSION, DISKQUOTA_MINOR_VERSION))); + + int rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET|WL_TIMEOUT|WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); + ResetLatch(&MyProc->procLatch); + if (rc & WL_POSTMASTER_DEATH) { + ereport(LOG, + (errmsg("[diskquota] bgworker for '%s' is being terminated by postmaster death.", dbname))); + proc_exit(-1); + } + + times++; + } + + /* + * Set ps display name of the worker process of diskquota, so we can + * distinguish them quickly. Note: never mind parameter name of the + * function `init_ps_display`, we only want the ps name looks like + * 'bgworker: [diskquota] ...' + */ + init_ps_display("bgworker:", "[diskquota]", dbname, ""); + /* Waiting for diskquota state become ready */ while (!got_sigterm) { @@ -462,7 +512,7 @@ disk_quota_launcher_main(Datum main_arg) /* diskquota launcher should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; - + /* * use table diskquota_namespace.database_list to store diskquota enabled * database. @@ -825,7 +875,7 @@ on_add_db(Oid dbid, MessageResult * code) /* * Handle message: drop extension diskquota - * do our best to: + * do: * 1. kill the associated worker process * 2. delete dbid from diskquota_namespace.database_list * 3. invalidate black-map entries and monitoring_dbid_cache from shared memory @@ -1010,13 +1060,13 @@ worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) if (found) { workerentry->handle = handle; - } + } LWLockRelease(diskquota_locks.worker_map_lock); if (!found) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] worker not found for database \"%s\"", - get_database_name(dbid)))); + get_database_name(dbid)))); } return found; } diff --git a/diskquota.h b/diskquota.h index d9e596e7556..470bd35a5d3 100644 --- a/diskquota.h +++ b/diskquota.h @@ -117,6 +117,8 @@ struct DiskQuotaWorkerEntry Oid dbid; pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ bool is_paused; /* true if this worker is paused */ + + // NOTE: this field only can access in diskquota launcher, in other process it is dangling pointer BackgroundWorkerHandle *handle; }; @@ -145,6 +147,7 @@ extern int diskquota_max_active_tables; extern bool diskquota_hardlimit; extern int SEGCOUNT; +extern int worker_spi_get_extension_version(int *major, int *minor); extern int get_ext_major_version(void); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); diff --git a/diskquota_utility.c b/diskquota_utility.c index 0b031c24929..6ac10d9e6a6 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -20,6 +20,7 @@ #include #include "access/aomd.h" +#include "access/xact.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_collation.h" @@ -32,6 +33,7 @@ #include "executor/spi.h" #include "nodes/makefuncs.h" #include "storage/proc.h" +#include "utils/snapmgr.h" #include "utils/builtins.h" #include "utils/faultinjector.h" #include "utils/fmgroids.h" @@ -563,27 +565,12 @@ register_diskquota_object_access_hook(void) object_access_hook = dq_object_access_hook; } -/* - * This hook is used to handle drop extension diskquota event - * It will send CMD_DROP_EXTENSION message to diskquota laucher. - * Laucher will terminate the corresponding worker process and - * remove the dbOid from the database_list table. - */ -static void -dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg) +static void dq_object_access_hook_on_drop(void) { - Oid oid; - int rc, launcher_pid; - - if (access != OAT_DROP || classId != ExtensionRelationId) - goto out; - oid = get_extension_oid("diskquota", true); - if (oid != objectId) - goto out; + int rc, launcher_pid; - /* - * Remove the current database from monitored db cache + /* + * Remove the current database from monitored db cache * on all segments and on coordinator. */ update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); @@ -642,6 +629,33 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); +} + +/* + * listening on any modify on pg_extension table when: + * DROP: will send CMD_DROP_EXTENSION to diskquota laucher + */ +static void +dq_object_access_hook(ObjectAccessType access, Oid classId, + Oid objectId, int subId, void *arg) +{ + if (classId != ExtensionRelationId) + goto out; + + if (get_extension_oid("diskquota", true) != objectId) + goto out; + + switch(access) { + case OAT_DROP: + dq_object_access_hook_on_drop(); + break; + case OAT_POST_ALTER: + case OAT_FUNCTION_EXECUTE: + case OAT_POST_CREATE: + case OAT_NAMESPACE_SEARCH: + break; + } + out: if (next_object_access_hook) (*next_object_access_hook) (access, classId, objectId, @@ -1207,6 +1221,58 @@ set_per_segment_quota(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +int worker_spi_get_extension_version(int *major, int *minor) +{ + StartTransactionCommand(); + int ret = SPI_connect(); + Assert(ret = SPI_OK_CONNECT); + PushActiveSnapshot(GetTransactionSnapshot()); + + ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); + + if (SPI_processed == 0) { + ret = -1; + goto out; + } + + if(ret != SPI_OK_SELECT || SPI_processed != 1) { + ereport(WARNING, + (errmsg("[diskquota] when reading installed version lines %ld code = %d", + SPI_processed, ret))); + return -1; + } + + bool is_null = false; + Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + + char *version = TextDatumGetCString(v); + if (version == NULL) { + ereport(WARNING, + (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. catalog might be corrupted"))); + return -1; + } + + ret = sscanf(version, "%d.%d", major, minor); + + if (ret != 2) { + ereport(WARNING, + (errmsg("[diskquota] 'extversion' is '%s' in pg_class.pg_extension which is not valid format. " + "catalog might be corrupted", + version))); + return -1; + } + + ret = 0; + +out: + SPI_finish(); + PopActiveSnapshot(); + CommitTransactionCommand(); + + return ret; +} + /* * Get major version from extversion, and convert it to int * 0 means an invalid major version. diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt new file mode 100644 index 00000000000..8bf81ca2f8f --- /dev/null +++ b/upgrade_test/CMakeLists.txt @@ -0,0 +1,17 @@ +include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) + +RegressTarget_Add(upgrade + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sql + EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/expected + SCHEDULE_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 + REGRESS_OPTS --dbname=contrib_regression) + +# not use `installcheck` target on purpose. +# upgrade test is not needed in feature development +add_custom_target(upgradecheck) + +add_dependencies(upgradecheck upgrade) diff --git a/upgrade_test/Makefile b/upgrade_test/Makefile deleted file mode 100644 index 5089f2d09ef..00000000000 --- a/upgrade_test/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -REGRESS = dummy - -REGRESS_OPTS = --schedule=diskquota_schedule_upgrade --schedule=diskquota_schedule_downgrade --init-file=init_file -PGXS := $(shell pg_config --pgxs) -include $(PGXS) diff --git a/upgrade_test/README.md b/upgrade_test/README.md index fc8c1b23033..deab7da6058 100644 --- a/upgrade_test/README.md +++ b/upgrade_test/README.md @@ -1,7 +1,29 @@ -# upgrade_extension test -The upgrade_extension test case will fail if -run it locally. Because it calls -"install_new_version_diskquota" function which is -defined in concourse/scripts/upgrade_extension.sh. -You can write this function by yourself and -export it locally if you want to run it successfully. +# add new version to upgrade or downgrade test + +- add a new `schedule` file like `schedule_1.0--2.0`. +- write those new test: + +``` +test: 1.0_install # Install diskquota version 1.0 +test: 1.0_set_quota # Create some quota configs under "1.0" diskquota schema +test: 1.0_catalog # Check if diskquota DDL is expected +test: 2.0_migrate_to_version_2.0 # Migrate 1.0 diskquota DDL to 2.0 +test: 2.0_catalog # Check if the migration results is expected as a newly created 2.0 diskquota schema +test: 1.0_test_in_2.0_quota_create_in_1.0 # Check if the quota config still works which has been created by 1.0 extension +test: 1.0_cleanup_quota # Drop extension +``` + +the file name means this is a upgrade test from 1.0 to 2.0. + +for downgrade test, just reverse the schedule file. + +--- + +`10.1_test_in_10.0_quota_create_in_10.1` means: + +- the file is for version 10.1 +- this is a test file +- the test occur in 10.0, use 10.0 binary and 10.0 SQL +- the item to test is created in 10.1 + +---- diff --git a/upgrade_test/diskquota_schedule_downgrade b/upgrade_test/diskquota_schedule_downgrade deleted file mode 100644 index a030ec15160..00000000000 --- a/upgrade_test/diskquota_schedule_downgrade +++ /dev/null @@ -1,23 +0,0 @@ -# Test new version diskquota with old extension script -test: install_new_version -test: init -test: prepare -test: set_config -# execute downgrade scripts -test: downgrade_extension -test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota -test: clean - -# Test downgrade extension to old version -# run by old version diskquota -test: install_old_version -test: install_new_version -test: init -test: prepare -test: set_config -test: downgrade_extension -# downgrade diskquota to old version -test: install_old_version -# run by old version diskquota -test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota -test: clean diff --git a/upgrade_test/diskquota_schedule_upgrade b/upgrade_test/diskquota_schedule_upgrade deleted file mode 100644 index c650f177a4f..00000000000 --- a/upgrade_test/diskquota_schedule_upgrade +++ /dev/null @@ -1,22 +0,0 @@ -# Test new version diskquota with old extension script -test: install_old_version -test: init -test: prepare -test: set_config -# upgrade diskquota.so to new version -test: install_new_version -test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota -test: clean - -# Test upgrade extension to new version -# run by old version diskquota -test: install_old_version -test: init -test: prepare -test: set_config -# upgrade diskquota to new version -test: install_new_version -test: upgrade_extension -# run by new version diskquota -test: test_role test_schema test_reschema test_temp_role test_rename test_delete_quota test_tablespace_schema test_tablespace_role test_tablespace_schema_perseg test_tablespace_role_perseg -test: clean diff --git a/upgrade_test/expected/1.0_catalog.out b/upgrade_test/expected/1.0_catalog.out new file mode 100644 index 00000000000..93b7e0ab6ab --- /dev/null +++ b/upgrade_test/expected/1.0_catalog.out @@ -0,0 +1,135 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +------------------------------+---------------------------------------------- + diskquota_active_table_type | {int8,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int4,oid,oid,tid,xid,xid,cid,cid} +(7 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) AS reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) AS reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | +(7 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) AS prorettype, + typeid_to_name(proargtypes) AS proargtypes, + typeid_to_name(proallargtypes) AS proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +----------------------------+-------------------------------+-------------+----------------+-------------+----------------------------+----------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota.so | + update_diskquota_db_list | {void} | {oid,int4} | | | update_diskquota_db_list | $libdir/diskquota.so | +(5 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER BY + schemaname, viewname; + schemaname | viewname | definition +------------+------------------------------+------------------------------------------------------------------------------------------------------------ + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size)) AS dbsize; + diskquota | show_fast_role_quota_view | SELECT pgr.rolname AS role_name, + + | | pgc.relowner AS role_oid, + + | | qc.quotalimitmb AS quota_in_mb, + + | | sum(ts.size) AS rolsize_in_bytes + + | | FROM diskquota.table_size ts, + + | | pg_class pgc, + + | | diskquota.quota_config qc, + + | | pg_roles pgr + + | | WHERE (((pgc.relowner = qc.targetoid) AND (pgc.relowner = pgr.oid)) AND (ts.tableid = pgc.oid)) + + | | GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitmb; + diskquota | show_fast_schema_quota_view | SELECT pgns.nspname AS schema_name, + + | | pgc.relnamespace AS schema_oid, + + | | qc.quotalimitmb AS quota_in_mb, + + | | sum(ts.size) AS nspsize_in_bytes + + | | FROM diskquota.table_size ts, + + | | pg_class pgc, + + | | diskquota.quota_config qc, + + | | pg_namespace pgns + + | | WHERE (((ts.tableid = pgc.oid) AND (qc.targetoid = pgc.relnamespace)) AND (pgns.oid = pgc.relnamespace))+ + | | GROUP BY pgns.nspname, pgc.relnamespace, qc.quotalimitmb + + | | ORDER BY pgns.nspname; +(3 rows) + +-- views end +DROP FUNCTION typeid_to_name(oid[]); diff --git a/upgrade_test/expected/1.0_cleanup_quota.out b/upgrade_test/expected/1.0_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected/1.0_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected/1.0_install.out b/upgrade_test/expected/1.0_install.out new file mode 100644 index 00000000000..54f9f94a86a --- /dev/null +++ b/upgrade_test/expected/1.0_install.out @@ -0,0 +1,14 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +NOTICE: database "diskquota" does not exist, skipping +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/1.0_migrate_to_version_1.0.out b/upgrade_test/expected/1.0_migrate_to_version_1.0.out new file mode 100644 index 00000000000..21ffc1e2e12 --- /dev/null +++ b/upgrade_test/expected/1.0_migrate_to_version_1.0.out @@ -0,0 +1,12 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota.so +Segment value: diskquota.so +\c +alter extension diskquota update to '1.0'; +-- downgrade to 1.0 need reboot, the version check is not in 1.0 +-- worker status is undefined at just downgrade +\! gpstop -arf > /dev/null diff --git a/upgrade_test/expected/1.0_set_quota.out b/upgrade_test/expected/1.0_set_quota.out new file mode 100644 index 00000000000..d8d661fc412 --- /dev/null +++ b/upgrade_test/expected/1.0_set_quota.out @@ -0,0 +1,35 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota.so +Segment value: diskquota.so +create extension diskquota with version '1.0'; +\!sleep 5 +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok, but should fail after upgrade +-- role quota +create schema srole; +create role u1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok, but should fail after upgrade +\!sleep 5 +-- leaked resource: +-- role u1 +-- table s1.a, srole.b +-- schema s1, srole diff --git a/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out b/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out new file mode 100644 index 00000000000..57da8569dc1 --- /dev/null +++ b/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out @@ -0,0 +1,10 @@ +-- need run 1.0_set_quota before run this test +-- FIXME add version check here +\!sleep 5 +insert into s1.a select generate_series(1, 100); -- fail +ERROR: schema's disk space quota exceeded with name:s1 +insert into srole.b select generate_series(1, 100); -- fail +ERROR: role's disk space quota exceeded with name:u1 +drop table s1.a, srole.b; +drop schema s1, srole; +drop role u1; diff --git a/upgrade_test/expected/2.0_catalog.out b/upgrade_test/expected/2.0_catalog.out new file mode 100644 index 00000000000..67d66fc8319 --- /dev/null +++ b/upgrade_test/expected/2.0_catalog.out @@ -0,0 +1,264 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+----------------------------------------------------- + blackmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + blackmap_entry | {bool,int4,oid,oid,oid} + blackmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,_oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} +(14 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + blackmap_entry | {blackmap_entry} | + blackmap_entry_detail | {blackmap_entry_detail} | + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | +(12 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.0.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.0.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.0.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.0.so | + refresh_blackmap | {void} | {_blackmap_entry,_oid} | | | refresh_blackmap | $libdir/diskquota-2.0.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM pg_class WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.0.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.0.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.0.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.0.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.0.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.0.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.0.so | + show_blackmap | {blackmap_entry_detail} | | | | show_blackmap | $libdir/diskquota-2.0.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.0.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.0.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.0.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.0.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | blackmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_blackmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = (-1)))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT pg_class.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT pg_class.relowner, + + | | CASE + + | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE pg_class.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class, + + | | default_tablespace + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT config.targetoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = target.primaryoid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.targetoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.targetoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT pg_class.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT pg_class.relnamespace, + + | | CASE + + | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE pg_class.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class, + + | | default_tablespace + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT config.targetoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = target.primaryoid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.targetoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.targetoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); +(6 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected/2.0_cleanup_quota.out b/upgrade_test/expected/2.0_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected/2.0_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected/2.0_install.out b/upgrade_test/expected/2.0_install.out new file mode 100644 index 00000000000..97593816666 --- /dev/null +++ b/upgrade_test/expected/2.0_install.out @@ -0,0 +1,14 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +NOTICE: database "diskquota" does not exist, skipping +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/2.0_migrate_to_version_2.0.out b/upgrade_test/expected/2.0_migrate_to_version_2.0.out new file mode 100644 index 00000000000..ff9b417aea7 --- /dev/null +++ b/upgrade_test/expected/2.0_migrate_to_version_2.0.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.0.so +Segment value: diskquota-2.0.so +\c +alter extension diskquota update to '2.0'; +\! sleep 5 diff --git a/upgrade_test/expected/2.0_set_quota.out b/upgrade_test/expected/2.0_set_quota.out new file mode 100644 index 00000000000..ef0f2b04c0f --- /dev/null +++ b/upgrade_test/expected/2.0_set_quota.out @@ -0,0 +1,63 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.0.so +Segment value: diskquota-2.0.so +create extension diskquota with version '2.0'; +\!sleep 5 +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out b/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out new file mode 100644 index 00000000000..2f9b5714cf8 --- /dev/null +++ b/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out @@ -0,0 +1,14 @@ +-- need run 1.0_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name:s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name:u1 +insert into rolespcrole.b select generate_series(1, 100000); -- ok. +insert into spcs1.a select generate_series(1, 100000); -- ok. +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected/clean.out b/upgrade_test/expected/clean.out deleted file mode 100644 index 1090a18ac34..00000000000 --- a/upgrade_test/expected/clean.out +++ /dev/null @@ -1,44 +0,0 @@ -DROP TABLE IF EXISTS badquota.t1; -DROP TABLE IF EXISTS badbody_schema.t2; -DROP ROLE IF EXISTS testbody; -DROP SCHEMA IF EXISTS badquota; -DROP ROLE IF EXISTS badbody; -DROP SCHEMA IF EXISTS badbody_scehma; -NOTICE: schema "badbody_scehma" does not exist, skipping -DROP SCHEMA IF EXISTS deleteschema; -NOTICE: schema "deleteschema" does not exist, skipping -DROP SCHEMA IF EXISTS srs1; -NOTICE: schema "srs1" does not exist, skipping -DROP SCHEMA IF EXISTS srr1; -NOTICE: schema "srr1" does not exist, skipping -DROP SCHEMA IF EXISTS srE; -NOTICE: schema "sre" does not exist, skipping -DROP SCHEMA IF EXISTS s1; -NOTICE: schema "s1" does not exist, skipping -DROP SCHEMA IF EXISTS s2; -NOTICE: schema "s2" does not exist, skipping -DROP SCHEMA IF EXISTS s3; -NOTICE: schema "s3" does not exist, skipping -DROP SCHEMA IF EXISTS s4; -NOTICE: schema "s4" does not exist, skipping -DROP SCHEMA IF EXISTS s5; -DROP SCHEMA IF EXISTS s6; -NOTICE: schema "s6" does not exist, skipping -DROP SCHEMA IF EXISTS s7; -DROP TABLE IF EXISTS b; -NOTICE: table "b" does not exist, skipping -DROP TABLE IF EXISTS b2; -NOTICE: table "b2" does not exist, skipping -DROP ROLE IF EXISTS srerole; -NOTICE: role "srerole" does not exist, skipping -DROP ROLE IF EXISTS srole; -NOTICE: role "srole" does not exist, skipping -DROP ROLE IF EXISTS strole; -NOTICE: role "strole" does not exist, skipping -DROP ROLE IF EXISTS u1; -NOTICE: role "u1" does not exist, skipping -DROP ROLE IF EXISTS u2; -NOTICE: role "u2" does not exist, skipping -DROP ROLE IF EXISTS u3temp; -NOTICE: role "u3temp" does not exist, skipping -DROP EXTENSION diskquota; diff --git a/upgrade_test/expected/downgrade_extension.out b/upgrade_test/expected/downgrade_extension.out deleted file mode 100644 index d10f1216c50..00000000000 --- a/upgrade_test/expected/downgrade_extension.out +++ /dev/null @@ -1,2 +0,0 @@ -\set old_version `echo $OLD_VERSION` -alter extension diskquota update to :'old_version'; diff --git a/upgrade_test/expected/init.out b/upgrade_test/expected/init.out deleted file mode 100644 index 12069b85663..00000000000 --- a/upgrade_test/expected/init.out +++ /dev/null @@ -1,21 +0,0 @@ --- start_ignore -\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null --- end_ignore -\! echo $? -0 --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? -0 -\! sleep 10 diff --git a/upgrade_test/expected/install_new_version.out b/upgrade_test/expected/install_new_version.out deleted file mode 100644 index cbc8a21ca0b..00000000000 --- a/upgrade_test/expected/install_new_version.out +++ /dev/null @@ -1,2 +0,0 @@ -\! install_new_version_diskquota -\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/install_old_version.out b/upgrade_test/expected/install_old_version.out deleted file mode 100644 index 1622491202c..00000000000 --- a/upgrade_test/expected/install_old_version.out +++ /dev/null @@ -1,2 +0,0 @@ -\! install_old_version_diskquota -\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/prepare.out b/upgrade_test/expected/prepare.out deleted file mode 100644 index 84afcfabf60..00000000000 --- a/upgrade_test/expected/prepare.out +++ /dev/null @@ -1,76 +0,0 @@ -CREATE EXTENSION diskquota; --- start_ignore -\! gpstop -u -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Starting gpstop with args: -u -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Gathering information and validating the environment... -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Greenplum Master catalog information -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Obtaining Segment details from master... -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.0.0-alpha.0+dev.16105.gdfbfc2b build dev' -20190319:07:07:05:020219 gpstop:df38f510da4b:gpadmin-[INFO]:-Signalling all postmaster processes to reload -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- end_ignore -SELECT pg_sleep(15); - pg_sleep ----------- - -(1 row) - --- prepare a schema that has reached quota limit -CREATE SCHEMA badquota; -SELECT diskquota.set_schema_quota('badquota', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -DROP ROLE IF EXISTS testbody; -NOTICE: role "testbody" does not exist, skipping -CREATE ROLE testbody; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000); -SELECT pg_sleep(10); - pg_sleep ----------- - -(1 row) - --- expect fail -INSERT INTO badquota.t1 SELECT generate_series(0, 10); -ERROR: schema's disk space quota exceeded with name:badquota --- prepare a role that has reached quota limit -DROP SCHEMA IF EXISTS badbody_schema; -NOTICE: schema "badbody_schema" does not exist, skipping -CREATE SCHEMA badbody_schema; -DROP ROLE IF EXISTS badbody; -NOTICE: role "badbody" does not exist, skipping -CREATE ROLE badbody; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('badbody', '2 MB'); - set_role_quota ----------------- - -(1 row) - -CREATE TABLE badbody_schema.t2(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE badbody_schema.t2 OWNER TO badbody; -INSERT INTO badbody_schema.t2 SELECT generate_series(0, 100000); -SELECT pg_sleep(10); - pg_sleep ----------- - -(1 row) - --- expect fail -INSERT INTO badbody_schema.t2 SELECT generate_series(0, 10); -ERROR: role's disk space quota exceeded with name:badbody diff --git a/upgrade_test/expected/set_config.out b/upgrade_test/expected/set_config.out deleted file mode 100644 index da44d8fb407..00000000000 --- a/upgrade_test/expected/set_config.out +++ /dev/null @@ -1,94 +0,0 @@ --- Test schema -CREATE SCHEMA s1; -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - --- Test delete disk quota -CREATE SCHEMA deleteschema; -SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); - set_schema_quota ------------------- - -(1 row) - --- test rename schema -CREATE SCHEMA srs1; -SELECT diskquota.set_schema_quota('srs1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - --- test rename role -CREATE SCHEMA srr1; -DROP ROLE IF EXISTS srerole; -CREATE ROLE srerole NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('srerole', '1MB'); - set_role_quota ----------------- - -(1 row) - --- Test re-set_schema_quota -CREATE SCHEMA srE; -SELECT diskquota.set_schema_quota('srE', '1 MB'); - set_schema_quota ------------------- - -(1 row) - --- Test role quota -CREATE SCHEMA srole; -DROP ROLE IF EXISTS u1; -NOTICE: role "u1" does not exist, skipping -DROP ROLE IF EXISTS u2; -NOTICE: role "u2" does not exist, skipping -CREATE ROLE u1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE u2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE b2 OWNER TO u1; -SELECT diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - --- Test temp table restrained by role id -CREATE SCHEMA strole; -DROP ROLE IF EXISTS u3temp; -CREATE ROLE u3temp NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('u3temp', '1MB'); - set_role_quota ----------------- - -(1 row) - --- Test toast -CREATE SCHEMA s5; -SELECT diskquota.set_schema_quota('s5', '1 MB'); - set_schema_quota ------------------- - -(1 row) - --- Test truncate -CREATE SCHEMA s7; -SELECT diskquota.set_schema_quota('s7', '1 MB'); - set_schema_quota ------------------- - -(1 row) - diff --git a/upgrade_test/expected/test_delete_quota.out b/upgrade_test/expected/test_delete_quota.out deleted file mode 100644 index cfa97bab5b0..00000000000 --- a/upgrade_test/expected/test_delete_quota.out +++ /dev/null @@ -1,34 +0,0 @@ --- Test delete disk quota --- CREATE SCHEMA deleteschema; --- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); -SET search_path TO deleteschema; -CREATE TABLE c (i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. --- expect failed -INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - -(1 row) - --- expect fail -INSERT INTO c SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:deleteschema -SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - -INSERT INTO c SELECT generate_series(1,100); -DROP TABLE c; -RESET search_path; -DROP SCHEMA deleteschema; diff --git a/upgrade_test/expected/test_rename.out b/upgrade_test/expected/test_rename.out deleted file mode 100644 index c91de17882b..00000000000 --- a/upgrade_test/expected/test_rename.out +++ /dev/null @@ -1,64 +0,0 @@ --- test rename schema --- CREATE SCHEMA srs1; --- SELECT diskquota.set_schema_quota('srs1', '1 MB'); -set search_path to srs1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs1 -ALTER SCHEMA srs1 RENAME TO srs2; -SET search_path TO srs2; --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs2 --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs2 -DROP TABLE a2; -RESET search_path; -DROP SCHEMA srs2; --- test rename role --- CREATE SCHEMA srr1; --- CREATE ROLE srerole NOLOGIN; --- SELECT diskquota.set_role_quota('srerole', '1MB'); -SET search_path TO srr1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE a OWNER TO srerole; --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole -ALTER ROLE srerole RENAME TO srerole2; --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole2 --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole2 -DROP TABLE a2; -DROP ROLE srerole2; -RESET search_path; -DROP SCHEMA srr1; diff --git a/upgrade_test/expected/test_reschema.out b/upgrade_test/expected/test_reschema.out deleted file mode 100644 index 10d280dd0ee..00000000000 --- a/upgrade_test/expected/test_reschema.out +++ /dev/null @@ -1,36 +0,0 @@ --- Test re-set_schema_quota --- CREATE SCHEMA srE; --- SELECT diskquota.set_schema_quota('srE', '1 MB'); -SET search_path TO srE; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail when exceed quota limit -INSERT INTO a SELECT generate_series(1,1000); -ERROR: schema's disk space quota exceeded with name:sre --- set schema quota larger -SELECT diskquota.set_schema_quota('srE', '1 GB'); - set_schema_quota ------------------- - -(1 row) - -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,1000); -DROP TABLE a; -RESET search_path; -DROP SCHEMA srE; diff --git a/upgrade_test/expected/test_role.out b/upgrade_test/expected/test_role.out deleted file mode 100644 index df883066aa0..00000000000 --- a/upgrade_test/expected/test_role.out +++ /dev/null @@ -1,46 +0,0 @@ --- Test role quota --- CREATE SCHEMA srole; --- SET search_path TO srole; --- --- CREATE ROLE u1 NOLOGIN; --- CREATE ROLE u2 NOLOGIN; --- CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); --- ALTER TABLE b OWNER TO u1; --- CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); --- ALTER TABLE b2 OWNER TO u1; --- --- SELECT diskquota.set_role_quota('u1', '1 MB'); -INSERT INTO b SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u1 --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u1 -ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); -ALTER TABLE b OWNER TO badbody; --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:badbody -DROP TABLE b, b2; -DROP ROLE u1, u2; -RESET search_path; -DROP SCHEMA srole; diff --git a/upgrade_test/expected/test_schema.out b/upgrade_test/expected/test_schema.out deleted file mode 100644 index ab4864ad472..00000000000 --- a/upgrade_test/expected/test_schema.out +++ /dev/null @@ -1,57 +0,0 @@ --- Test schema --- CREATE SCHEMA s1; --- SELECT diskquota.set_schema_quota('s1', '1 MB'); -SET search_path TO s1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 -CREATE TABLE a2(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 --- Test alter table set schema -CREATE SCHEMA s2; -ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO s2.a SELECT generate_series(1,200); -ALTER TABLE s2.a SET SCHEMA badquota; --- expect failed -INSERT INTO badquota.a SELECT generate_series(0, 100); -ERROR: schema's disk space quota exceeded with name:badquota -SELECT pg_sleep(10); - pg_sleep ----------- - -(1 row) - -SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; - schema_name | quota_in_mb --------------+------------- - s1 | 1 -(1 row) - -RESET search_path; -DROP TABLE s1.a2, badquota.a; -DROP SCHEMA s1, s2; diff --git a/upgrade_test/expected/test_tablespace_role.out b/upgrade_test/expected/test_tablespace_role.out deleted file mode 120000 index f7b0a38658d..00000000000 --- a/upgrade_test/expected/test_tablespace_role.out +++ /dev/null @@ -1 +0,0 @@ -../../expected/test_tablespace_role.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_role_perseg.out b/upgrade_test/expected/test_tablespace_role_perseg.out deleted file mode 120000 index d95b77ecb94..00000000000 --- a/upgrade_test/expected/test_tablespace_role_perseg.out +++ /dev/null @@ -1 +0,0 @@ -../../expected/test_tablespace_role_perseg.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_schema.out b/upgrade_test/expected/test_tablespace_schema.out deleted file mode 120000 index 13dffba2095..00000000000 --- a/upgrade_test/expected/test_tablespace_schema.out +++ /dev/null @@ -1 +0,0 @@ -../../expected/test_tablespace_schema.out \ No newline at end of file diff --git a/upgrade_test/expected/test_tablespace_schema_perseg.out b/upgrade_test/expected/test_tablespace_schema_perseg.out deleted file mode 120000 index 90dff77a254..00000000000 --- a/upgrade_test/expected/test_tablespace_schema_perseg.out +++ /dev/null @@ -1 +0,0 @@ -../../expected/test_tablespace_schema_perseg.out \ No newline at end of file diff --git a/upgrade_test/expected/test_temp_role.out b/upgrade_test/expected/test_temp_role.out deleted file mode 100644 index f3a415a54b4..00000000000 --- a/upgrade_test/expected/test_temp_role.out +++ /dev/null @@ -1,36 +0,0 @@ --- Test temp table restrained by role id --- CREATE SCHEMA strole; --- CREATE ROLE u3temp NOLOGIN; -SET search_path TO strole; --- SELECT diskquota.set_role_quota('u3temp', '1MB'); -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE a OWNER TO u3temp; -CREATE TEMP TABLE ta(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE ta OWNER TO u3temp; --- expected failed: fill temp table -INSERT INTO ta SELECT generate_series(1,100000); -SELECT pg_sleep(5); - pg_sleep ----------- - -(1 row) - --- expected failed: -INSERT INTO a SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u3temp -DROP TABLE ta; -SELECT pg_sleep(20); - pg_sleep ----------- - -(1 row) - -INSERT INTO a SELECT generate_series(1,100); -DROP TABLE a; -DROP ROLE u3temp; -RESET search_path; -DROP SCHEMA strole; diff --git a/upgrade_test/expected/upgrade_extension.out b/upgrade_test/expected/upgrade_extension.out deleted file mode 100644 index 4f3982d3c80..00000000000 --- a/upgrade_test/expected/upgrade_extension.out +++ /dev/null @@ -1,2 +0,0 @@ -\set new_version `echo $NEW_VERSION` -alter extension diskquota update to :'new_version'; diff --git a/upgrade_test/schedule_1.0--2.0 b/upgrade_test/schedule_1.0--2.0 new file mode 100644 index 00000000000..2b40ab590f8 --- /dev/null +++ b/upgrade_test/schedule_1.0--2.0 @@ -0,0 +1,8 @@ +test: 1.0_install +test: 1.0_set_quota +test: 1.0_catalog +test: 2.0_migrate_to_version_2.0 +test: 2.0_catalog +# run 1.0 behavior test using 2.0 DDL and binary +test: 1.0_test_in_2.0_quota_create_in_1.0 +test: 1.0_cleanup_quota diff --git a/upgrade_test/schedule_2.0--1.0 b/upgrade_test/schedule_2.0--1.0 new file mode 100644 index 00000000000..55a959bad72 --- /dev/null +++ b/upgrade_test/schedule_2.0--1.0 @@ -0,0 +1,8 @@ +test: 2.0_install +test: 2.0_set_quota +test: 2.0_catalog +test: 1.0_migrate_to_version_1.0 +test: 1.0_catalog +# run 2.0 behavior test using 1.0 DDL and binary +test: 2.0_test_in_1.0_quota_create_in_2.0 +test: 2.0_cleanup_quota diff --git a/upgrade_test/sql/1.0_catalog.sql b/upgrade_test/sql/1.0_catalog.sql new file mode 100644 index 00000000000..e376725ef83 --- /dev/null +++ b/upgrade_test/sql/1.0_catalog.sql @@ -0,0 +1,80 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) AS reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) AS reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) AS prorettype, + typeid_to_name(proargtypes) AS proargtypes, + typeid_to_name(proallargtypes) AS proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER BY + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name(oid[]); diff --git a/upgrade_test/sql/1.0_cleanup_quota.sql b/upgrade_test/sql/1.0_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/sql/1.0_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/sql/1.0_install.sql b/upgrade_test/sql/1.0_install.sql new file mode 100644 index 00000000000..95f758a89ed --- /dev/null +++ b/upgrade_test/sql/1.0_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/1.0_migrate_to_version_1.0.sql b/upgrade_test/sql/1.0_migrate_to_version_1.0.sql new file mode 100644 index 00000000000..6d9763ca66b --- /dev/null +++ b/upgrade_test/sql/1.0_migrate_to_version_1.0.sql @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '1.0'; +-- downgrade to 1.0 need reboot, the version check is not in 1.0 +-- worker status is undefined at just downgrade +\! gpstop -arf > /dev/null diff --git a/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql b/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql new file mode 100644 index 00000000000..cc4420dd082 --- /dev/null +++ b/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql @@ -0,0 +1,21 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' +\! gpstop -raf > /dev/null +drop database if exists diskquota; + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +create database diskquota; + +\! gpconfig -c shared_preload_libraries -v 'diskquota-1.0.so' +\! gpconfig -c diskquota.naptime -v '1' +\! gpstop -raf > /dev/null + +create extension diskquota version '1.0' -- for now 1.o installed + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' +\! gpstop -raf > /dev/null + +-- FIXME check diskquota shoud prompt user to do upgrade diff --git a/upgrade_test/sql/1.0_set_quota.sql b/upgrade_test/sql/1.0_set_quota.sql new file mode 100644 index 00000000000..cf1516347a2 --- /dev/null +++ b/upgrade_test/sql/1.0_set_quota.sql @@ -0,0 +1,25 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '1.0'; +\!sleep 5 + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok, but should fail after upgrade + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok, but should fail after upgrade + +\!sleep 5 + +-- leaked resource: +-- role u1 +-- table s1.a, srole.b +-- schema s1, srole diff --git a/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql b/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql new file mode 100644 index 00000000000..c6aeb2f478f --- /dev/null +++ b/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql @@ -0,0 +1,11 @@ +-- need run 1.0_set_quota before run this test +-- FIXME add version check here + +\!sleep 5 + +insert into s1.a select generate_series(1, 100); -- fail +insert into srole.b select generate_series(1, 100); -- fail + +drop table s1.a, srole.b; +drop schema s1, srole; +drop role u1; diff --git a/upgrade_test/sql/2.0_catalog.sql b/upgrade_test/sql/2.0_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/upgrade_test/sql/2.0_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/sql/2.0_cleanup_quota.sql b/upgrade_test/sql/2.0_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/sql/2.0_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/sql/2.0_install.sql b/upgrade_test/sql/2.0_install.sql new file mode 100644 index 00000000000..b51150f6fde --- /dev/null +++ b/upgrade_test/sql/2.0_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/2.0_migrate_to_version_2.0.sql b/upgrade_test/sql/2.0_migrate_to_version_2.0.sql new file mode 100644 index 00000000000..f001f664f60 --- /dev/null +++ b/upgrade_test/sql/2.0_migrate_to_version_2.0.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.0'; +\! sleep 5 diff --git a/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql b/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql new file mode 100644 index 00000000000..b8f3d099673 --- /dev/null +++ b/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql @@ -0,0 +1,23 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' +\! gpstop -raf > /dev/null +\! gpconfig -c diskquota.naptime -v '1' +\! gpstop -raf > /dev/null + +\c +create extension diskquota version '2.0' -- for now 2.0 installed + +\! gpconfig -c shared_preload_libraries -v 'diskquota-1.0.so' +\! gpstop -raf > /dev/null + +-- FIXME check diskquota shoud prompt user to do downgrade diff --git a/upgrade_test/sql/2.0_set_quota.sql b/upgrade_test/sql/2.0_set_quota.sql new file mode 100644 index 00000000000..12a47e7b2bb --- /dev/null +++ b/upgrade_test/sql/2.0_set_quota.sql @@ -0,0 +1,44 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.0'; +\!sleep 5 + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql b/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql new file mode 100644 index 00000000000..575beedafe2 --- /dev/null +++ b/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql @@ -0,0 +1,16 @@ +-- need run 1.0_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- ok. +insert into spcs1.a select generate_series(1, 100000); -- ok. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/sql/clean.sql b/upgrade_test/sql/clean.sql deleted file mode 100644 index 908aa891f77..00000000000 --- a/upgrade_test/sql/clean.sql +++ /dev/null @@ -1,28 +0,0 @@ -DROP TABLE IF EXISTS badquota.t1; -DROP TABLE IF EXISTS badbody_schema.t2; -DROP ROLE IF EXISTS testbody; -DROP SCHEMA IF EXISTS badquota; -DROP ROLE IF EXISTS badbody; -DROP SCHEMA IF EXISTS badbody_scehma; -DROP SCHEMA IF EXISTS deleteschema; -DROP SCHEMA IF EXISTS srs1; -DROP SCHEMA IF EXISTS srr1; -DROP SCHEMA IF EXISTS srE; -DROP SCHEMA IF EXISTS s1; -DROP SCHEMA IF EXISTS s2; -DROP SCHEMA IF EXISTS s3; -DROP SCHEMA IF EXISTS s4; -DROP SCHEMA IF EXISTS s5; -DROP SCHEMA IF EXISTS s6; -DROP SCHEMA IF EXISTS s7; - -DROP TABLE IF EXISTS b; -DROP TABLE IF EXISTS b2; -DROP ROLE IF EXISTS srerole; -DROP ROLE IF EXISTS srole; -DROP ROLE IF EXISTS strole; -DROP ROLE IF EXISTS u1; -DROP ROLE IF EXISTS u2; -DROP ROLE IF EXISTS u3temp; - -DROP EXTENSION diskquota; diff --git a/upgrade_test/sql/init.sql b/upgrade_test/sql/init.sql deleted file mode 100644 index 5ee8828d5ac..00000000000 --- a/upgrade_test/sql/init.sql +++ /dev/null @@ -1,19 +0,0 @@ --- start_ignore -\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c diskquota.naptime -v 2 > /dev/null --- end_ignore -\! echo $? --- start_ignore -\! gpconfig -c max_worker_processes -v 20 > /dev/null --- end_ignore -\! echo $? - --- start_ignore -\! gpstop -raf > /dev/null --- end_ignore -\! echo $? - -\! sleep 10 diff --git a/upgrade_test/sql/install_new_version.sql b/upgrade_test/sql/install_new_version.sql deleted file mode 100644 index cbc8a21ca0b..00000000000 --- a/upgrade_test/sql/install_new_version.sql +++ /dev/null @@ -1,2 +0,0 @@ -\! install_new_version_diskquota -\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/install_old_version.sql b/upgrade_test/sql/install_old_version.sql deleted file mode 100644 index 1622491202c..00000000000 --- a/upgrade_test/sql/install_old_version.sql +++ /dev/null @@ -1,2 +0,0 @@ -\! install_old_version_diskquota -\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/prepare.sql b/upgrade_test/sql/prepare.sql deleted file mode 100644 index f3de240d784..00000000000 --- a/upgrade_test/sql/prepare.sql +++ /dev/null @@ -1,32 +0,0 @@ -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); --- start_ignore -\! gpstop -u -SELECT diskquota.init_table_size_table(); --- end_ignore -SELECT pg_sleep(15); - --- prepare a schema that has reached quota limit -CREATE SCHEMA badquota; -SELECT diskquota.set_schema_quota('badquota', '1 MB'); -DROP ROLE IF EXISTS testbody; -CREATE ROLE testbody; -CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); -ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000); -SELECT pg_sleep(10); --- expect fail -INSERT INTO badquota.t1 SELECT generate_series(0, 10); - --- prepare a role that has reached quota limit -DROP SCHEMA IF EXISTS badbody_schema; -CREATE SCHEMA badbody_schema; -DROP ROLE IF EXISTS badbody; -CREATE ROLE badbody; -SELECT diskquota.set_role_quota('badbody', '2 MB'); -CREATE TABLE badbody_schema.t2(i INT) DISTRIBUTED BY (i); -ALTER TABLE badbody_schema.t2 OWNER TO badbody; -INSERT INTO badbody_schema.t2 SELECT generate_series(0, 100000); -SELECT pg_sleep(10); --- expect fail -INSERT INTO badbody_schema.t2 SELECT generate_series(0, 10); diff --git a/upgrade_test/sql/set_config.sql b/upgrade_test/sql/set_config.sql deleted file mode 100644 index 56d171e8f18..00000000000 --- a/upgrade_test/sql/set_config.sql +++ /dev/null @@ -1,41 +0,0 @@ --- Test schema -CREATE SCHEMA s1; -SELECT diskquota.set_schema_quota('s1', '1 MB'); --- Test delete disk quota -CREATE SCHEMA deleteschema; -SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); --- test rename schema -CREATE SCHEMA srs1; -SELECT diskquota.set_schema_quota('srs1', '1 MB'); --- test rename role -CREATE SCHEMA srr1; -DROP ROLE IF EXISTS srerole; -CREATE ROLE srerole NOLOGIN; -SELECT diskquota.set_role_quota('srerole', '1MB'); --- Test re-set_schema_quota -CREATE SCHEMA srE; -SELECT diskquota.set_schema_quota('srE', '1 MB'); --- Test role quota -CREATE SCHEMA srole; - -DROP ROLE IF EXISTS u1; -DROP ROLE IF EXISTS u2; -CREATE ROLE u1 NOLOGIN; -CREATE ROLE u2 NOLOGIN; -CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); -ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); -ALTER TABLE b2 OWNER TO u1; - -SELECT diskquota.set_role_quota('u1', '1 MB'); --- Test temp table restrained by role id -CREATE SCHEMA strole; -DROP ROLE IF EXISTS u3temp; -CREATE ROLE u3temp NOLOGIN; -SELECT diskquota.set_role_quota('u3temp', '1MB'); --- Test toast -CREATE SCHEMA s5; -SELECT diskquota.set_schema_quota('s5', '1 MB'); --- Test truncate -CREATE SCHEMA s7; -SELECT diskquota.set_schema_quota('s7', '1 MB'); diff --git a/upgrade_test/sql/test.sh b/upgrade_test/sql/test.sh deleted file mode 100644 index a720ced3678..00000000000 --- a/upgrade_test/sql/test.sh +++ /dev/null @@ -1,4 +0,0 @@ -function install_new_version_diskquota() { - echo "install_new_version" -} -export -f install_new_version_diskquota diff --git a/upgrade_test/sql/test_delete_quota.sql b/upgrade_test/sql/test_delete_quota.sql deleted file mode 100644 index 19151824e8c..00000000000 --- a/upgrade_test/sql/test_delete_quota.sql +++ /dev/null @@ -1,19 +0,0 @@ --- Test delete disk quota --- CREATE SCHEMA deleteschema; --- SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); -SET search_path TO deleteschema; - -CREATE TABLE c (i INT) DISTRIBUTED BY (i); --- expect failed -INSERT INTO c SELECT generate_series(1,100000); -SELECT pg_sleep(10); --- expect fail -INSERT INTO c SELECT generate_series(1,100); -SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); -SELECT pg_sleep(20); - -INSERT INTO c SELECT generate_series(1,100); - -DROP TABLE c; -RESET search_path; -DROP SCHEMA deleteschema; diff --git a/upgrade_test/sql/test_manytable.sql b/upgrade_test/sql/test_manytable.sql deleted file mode 100644 index d724926c953..00000000000 --- a/upgrade_test/sql/test_manytable.sql +++ /dev/null @@ -1,30 +0,0 @@ --- start_ignore --- \! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null --- end_ignore --- \! echo $? - -CREATE DATABASE test_manytable01; -CREATE DATABASE test_manytable02; - -\c test_manytable01 - -CREATE TABLE a01(i int) DISTRIBUTED BY (i); -CREATE TABLE a02(i int) DISTRIBUTED BY (i); -CREATE TABLE a03(i int) DISTRIBUTED BY (i); - -INSERT INTO a01 values(generate_series(0, 500)); -INSERT INTO a02 values(generate_series(0, 500)); -INSERT INTO a03 values(generate_series(0, 500)); - -\c test_manytable02 -CREATE TABLE b01(i int) DISTRIBUTED BY (i); -INSERT INTO b01 values(generate_series(0, 500)); - -\c postgres -DROP DATABASE test_manytable01; -DROP DATABASE test_manytable02; - --- start_ignore -\! gpconfig -r diskquota.max_active_tables -\! gpstop -far --- end_ignore diff --git a/upgrade_test/sql/test_rename.sql b/upgrade_test/sql/test_rename.sql deleted file mode 100644 index 5c2ece9df30..00000000000 --- a/upgrade_test/sql/test_rename.sql +++ /dev/null @@ -1,50 +0,0 @@ --- test rename schema --- CREATE SCHEMA srs1; --- SELECT diskquota.set_schema_quota('srs1', '1 MB'); -set search_path to srs1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ALTER SCHEMA srs1 RENAME TO srs2; -SET search_path TO srs2; - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); - -DROP TABLE a2; -RESET search_path; -DROP SCHEMA srs2; - --- test rename role --- CREATE SCHEMA srr1; --- CREATE ROLE srerole NOLOGIN; --- SELECT diskquota.set_role_quota('srerole', '1MB'); -SET search_path TO srr1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -ALTER TABLE a OWNER TO srerole; - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ALTER ROLE srerole RENAME TO srerole2; --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); - -DROP TABLE a2; -DROP ROLE srerole2; -RESET search_path; -DROP SCHEMA srr1; - diff --git a/upgrade_test/sql/test_reschema.sql b/upgrade_test/sql/test_reschema.sql deleted file mode 100644 index 73909297e2f..00000000000 --- a/upgrade_test/sql/test_reschema.sql +++ /dev/null @@ -1,20 +0,0 @@ --- Test re-set_schema_quota --- CREATE SCHEMA srE; --- SELECT diskquota.set_schema_quota('srE', '1 MB'); -SET search_path TO srE; -CREATE TABLE a(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail when exceed quota limit -INSERT INTO a SELECT generate_series(1,1000); --- set schema quota larger -SELECT diskquota.set_schema_quota('srE', '1 GB'); -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO a SELECT generate_series(1,1000); - -DROP TABLE a; -RESET search_path; -DROP SCHEMA srE; - diff --git a/upgrade_test/sql/test_role.sql b/upgrade_test/sql/test_role.sql deleted file mode 100644 index 08d19423486..00000000000 --- a/upgrade_test/sql/test_role.sql +++ /dev/null @@ -1,37 +0,0 @@ --- Test role quota - --- CREATE SCHEMA srole; --- SET search_path TO srole; --- --- CREATE ROLE u1 NOLOGIN; --- CREATE ROLE u2 NOLOGIN; --- CREATE TABLE b (t TEXT) DISTRIBUTED BY (i); --- ALTER TABLE b OWNER TO u1; --- CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (i); --- ALTER TABLE b2 OWNER TO u1; --- --- SELECT diskquota.set_role_quota('u1', '1 MB'); - -INSERT INTO b SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); -ALTER TABLE b OWNER TO u2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); - -ALTER TABLE b OWNER TO badbody; --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); - -DROP TABLE b, b2; -DROP ROLE u1, u2; -RESET search_path; -DROP SCHEMA srole; diff --git a/upgrade_test/sql/test_schema.sql b/upgrade_test/sql/test_schema.sql deleted file mode 100644 index b56ad59ef05..00000000000 --- a/upgrade_test/sql/test_schema.sql +++ /dev/null @@ -1,36 +0,0 @@ --- Test schema --- CREATE SCHEMA s1; --- SELECT diskquota.set_schema_quota('s1', '1 MB'); -SET search_path TO s1; - -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -CREATE TABLE a2(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); - --- Test alter table set schema -CREATE SCHEMA s2; -ALTER TABLE s1.a SET SCHEMA s2; -SELECT pg_sleep(20); --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO s2.a SELECT generate_series(1,200); - -ALTER TABLE s2.a SET SCHEMA badquota; --- expect failed -INSERT INTO badquota.a SELECT generate_series(0, 100); - -SELECT pg_sleep(10); -SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; - -RESET search_path; -DROP TABLE s1.a2, badquota.a; -DROP SCHEMA s1, s2; - diff --git a/upgrade_test/sql/test_tablespace_role.sql b/upgrade_test/sql/test_tablespace_role.sql deleted file mode 120000 index 1e694286c9a..00000000000 --- a/upgrade_test/sql/test_tablespace_role.sql +++ /dev/null @@ -1 +0,0 @@ -../../sql/test_tablespace_role.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_role_perseg.sql b/upgrade_test/sql/test_tablespace_role_perseg.sql deleted file mode 120000 index 46107ae84ba..00000000000 --- a/upgrade_test/sql/test_tablespace_role_perseg.sql +++ /dev/null @@ -1 +0,0 @@ -../../sql/test_tablespace_role_perseg.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_schema.sql b/upgrade_test/sql/test_tablespace_schema.sql deleted file mode 120000 index 74976f0842d..00000000000 --- a/upgrade_test/sql/test_tablespace_schema.sql +++ /dev/null @@ -1 +0,0 @@ -../../sql/test_tablespace_schema.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_tablespace_schema_perseg.sql b/upgrade_test/sql/test_tablespace_schema_perseg.sql deleted file mode 120000 index f4fc79267b9..00000000000 --- a/upgrade_test/sql/test_tablespace_schema_perseg.sql +++ /dev/null @@ -1 +0,0 @@ -../../sql/test_tablespace_schema_perseg.sql \ No newline at end of file diff --git a/upgrade_test/sql/test_temp_role.sql b/upgrade_test/sql/test_temp_role.sql deleted file mode 100644 index 6f4b52298e8..00000000000 --- a/upgrade_test/sql/test_temp_role.sql +++ /dev/null @@ -1,24 +0,0 @@ --- Test temp table restrained by role id --- CREATE SCHEMA strole; --- CREATE ROLE u3temp NOLOGIN; -SET search_path TO strole; - --- SELECT diskquota.set_role_quota('u3temp', '1MB'); -CREATE TABLE a(i int) DISTRIBUTED BY (i); -ALTER TABLE a OWNER TO u3temp; -CREATE TEMP TABLE ta(i int); -ALTER TABLE ta OWNER TO u3temp; - --- expected failed: fill temp table -INSERT INTO ta SELECT generate_series(1,100000); -SELECT pg_sleep(5); --- expected failed: -INSERT INTO a SELECT generate_series(1,100); -DROP TABLE ta; -SELECT pg_sleep(20); -INSERT INTO a SELECT generate_series(1,100); - -DROP TABLE a; -DROP ROLE u3temp; -RESET search_path; -DROP SCHEMA strole; From 7c87d2922ec82a920b1d56f3108cf210fb08bdaa Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 8 Mar 2022 17:39:48 +0800 Subject: [PATCH 153/330] Fix gcc warning (#175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by gcc 11.2.0: quotamodel.c:2115:25: warning: ‘strncpy’ output truncated before terminating nul copying 10 bytes from a string of the same length [-Wstringop-truncation] 2115 | strncpy(targettype_str, "ROLE_QUOTA", 10); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- quotamodel.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index f21630d18d7..5d7dd49b98a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -2089,12 +2089,13 @@ show_blackmap(PG_FUNCTION_ARGS) while ((blackmap_entry = hash_seq_search(&(blackmap_ctx->blackmap_seq))) != NULL) { +#define _TARGETTYPE_STR_SIZE 32 Datum result; Datum values[9]; bool nulls[9]; HeapTuple tuple; BlackMapEntry keyitem; - char targettype_str[32]; + char targettype_str[_TARGETTYPE_STR_SIZE]; RelFileNode blocked_relfilenode; memcpy(&blocked_relfilenode, @@ -2112,19 +2113,19 @@ show_blackmap(PG_FUNCTION_ARGS) switch ((QuotaType) keyitem.targettype) { case ROLE_QUOTA: - strncpy(targettype_str, "ROLE_QUOTA", 10); + StrNCpy(targettype_str, "ROLE_QUOTA", _TARGETTYPE_STR_SIZE); break; case NAMESPACE_QUOTA: - strncpy(targettype_str, "NAMESPACE_QUOTA", 15); + StrNCpy(targettype_str, "NAMESPACE_QUOTA", _TARGETTYPE_STR_SIZE); break; case ROLE_TABLESPACE_QUOTA: - strncpy(targettype_str, "ROLE_TABLESPACE_QUOTA", 21); + StrNCpy(targettype_str, "ROLE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); break; case NAMESPACE_TABLESPACE_QUOTA: - strncpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", 26); + StrNCpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); break; default: - strncpy(targettype_str, "UNKNOWN", 7); + StrNCpy(targettype_str, "UNKNOWN", _TARGETTYPE_STR_SIZE); break; } From a22cab1808e62d5d0d772dcc9f9b816c557ccb53 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 8 Mar 2022 17:44:27 +0800 Subject: [PATCH 154/330] Remove Makefiles (#174) Everything has been moved to cmake. --- tests/isolation2/Makefile | 12 ------------ tests/regress/Makefile | 17 ----------------- 2 files changed, 29 deletions(-) delete mode 100644 tests/isolation2/Makefile delete mode 100644 tests/regress/Makefile diff --git a/tests/isolation2/Makefile b/tests/isolation2/Makefile deleted file mode 100644 index b96bb1c22d1..00000000000 --- a/tests/isolation2/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -PGXS := $(shell pg_config --pgxs) -include $(PGXS) - -## Build pg_isolation2_regress and install auxiliary scripts to the correct locations. -.PHONY: pg_isolation2_regress -pg_isolation2_regress: - $(MAKE) -C $(abs_top_srcdir)/src/test/isolation2 install - cp $(abs_top_srcdir)/src/test/isolation2/sql_isolation_testcase.py ./ - -.PHONY: installcheck -installcheck: pg_isolation2_regress - $(abs_top_srcdir)/src/test/isolation2/pg_isolation2_regress --init-file=../init_file --psqldir=$(PSQLDIR) --inputdir=./sql --schedule=./isolation2_schedule --load-extension=gp_inject_fault diff --git a/tests/regress/Makefile b/tests/regress/Makefile deleted file mode 100644 index d017bde6e09..00000000000 --- a/tests/regress/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -REGRESS = dummy -REGRESS_OPTS = --schedule=diskquota_schedule --init-file=../init_file --init-file=./regress_init_file - -# FIXME: This check is hacky, since test_fetch_table_stat relies on the -# gp_inject_fault extension, we detect if the extension is built with -# greenplum by checking the output of the command 'pg_config --configure'. -# In the future, if the diskquota is built with GPDB7, or we backport the -# commit below to 6X_STABLE, we don't need this check. -# https://github.com/greenplum-db/gpdb/commit/8b897b12f6cb13753985faacab8e4053bf797a8b -ifneq (,$(findstring '--enable-debug-extensions',$(shell pg_config --configure))) -REGRESS_OPTS += --load-extension=gp_inject_fault -else -REGRESS_OPTS += --exclude-tests=test_fetch_table_stat -endif - -PGXS := $(shell pg_config --pgxs) -include $(PGXS) From e5b71bd72b528ca07bdad934b9f372b0317ac2dc Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 8 Mar 2022 12:37:48 +0800 Subject: [PATCH 155/330] cmake: also link RESULTS_DIR to working dir --- cmake/Regress.cmake | 10 +++++++++- tests/CMakeLists.txt | 2 ++ upgrade_test/CMakeLists.txt | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index 9b026995dbb..c2eabb09f33 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -4,6 +4,7 @@ # RegressTarget_Add( # SQL_DIR # EXPECTED_DIR +# RESULTS_DIR # [INIT_FILE ...] # [SCHEDULE_FILE ...] # [REGRESS ...] @@ -48,7 +49,7 @@ function(RegressTarget_Add name) cmake_parse_arguments( arg "" - "SQL_DIR;EXPECTED_DIR;DATA_DIR;REGRESS_TYPE" + "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE" "REGRESS;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" ${ARGN}) if (NOT arg_EXPECTED_DIR) @@ -59,6 +60,9 @@ function(RegressTarget_Add name) message(FATAL_ERROR "'SQL_DIR' needs to be specified.") endif() + if (NOT arg_RESULTS_DIR) + message(FATAL_ERROR "'RESULTS_DIR' needs to be specified") + endif() set(working_DIR "${CMAKE_CURRENT_BINARY_DIR}/${name}") file(MAKE_DIRECTORY ${working_DIR}) @@ -95,6 +99,7 @@ function(RegressTarget_Add name) get_filename_component(sql_DIR ${arg_SQL_DIR} ABSOLUTE) get_filename_component(expected_DIR ${arg_EXPECTED_DIR} ABSOLUTE) + get_filename_component(results_DIR ${arg_RESULTS_DIR} ABSOLUTE) if (arg_DATA_DIR) get_filename_component(data_DIR ${arg_DATA_DIR} ABSOLUTE) set(ln_data_dir_CMD ln -s ${data_DIR} data) @@ -108,6 +113,9 @@ function(RegressTarget_Add name) COMMAND ln -s ${sql_DIR} sql COMMAND rm -f expected COMMAND ln -s ${expected_DIR} expected + COMMAND rm -f results + COMMAND mkdir -p ${results_DIR} + COMMAND ln -s ${results_DIR} results COMMAND rm -f data COMMAND ${ln_data_dir_CMD} COMMAND diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e625ee65915..fc3363c1512 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -6,6 +6,7 @@ RegressTarget_Add(regress ${CMAKE_CURRENT_SOURCE_DIR}/regress/regress_init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule REGRESS_OPTS @@ -19,6 +20,7 @@ RegressTarget_Add(isolation2 ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule REGRESS_OPTS diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 8bf81ca2f8f..286872966c9 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -5,6 +5,7 @@ RegressTarget_Add(upgrade ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sql EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/expected + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/results SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 From a044b523d2847b02099034c203fd111b2795654b Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 7 Mar 2022 10:50:31 +0800 Subject: [PATCH 156/330] show current binary and schema version --- diskquota.c | 46 +++++++++++++++++++++ tests/regress/expected/test_show_status.out | 14 +++---- tests/regress/sql/test_show_status.sql | 14 +++---- 3 files changed, 60 insertions(+), 14 deletions(-) diff --git a/diskquota.c b/diskquota.c index d7da502594a..13385455c70 100644 --- a/diskquota.c +++ b/diskquota.c @@ -1259,6 +1259,50 @@ static const char* diskquota_status_check_hard_limit() return hardlimit ? "enabled": "disabled"; } +static const char* diskquota_status_binary_version() +{ + return DISKQUOTA_VERSION; +} + +static const char* diskquota_status_schema_version() +{ + static char version[64] = {0}; + memset(version, 0, sizeof(version)); + + int ret = SPI_connect(); + Assert(ret = SPI_OK_CONNECT); + + ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); + + if(ret != SPI_OK_SELECT || SPI_processed != 1) { + ereport(WARNING, + (errmsg("[diskquota] when reading installed version lines %ld code = %d", + SPI_processed, ret))); + goto out; + } + + if (SPI_processed == 0) { + goto out; + } + + bool is_null = false; + Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + + char *vv = TextDatumGetCString(v); + if (vv == NULL) { + ereport(WARNING, + (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. may catalog corrupted"))); + goto out; + } + + StrNCpy(version, vv, sizeof(version)); + +out: + SPI_finish(); + return version; +} + PG_FUNCTION_INFO_V1(diskquota_status); Datum diskquota_status(PG_FUNCTION_ARGS) { @@ -1274,6 +1318,8 @@ Datum diskquota_status(PG_FUNCTION_ARGS) static const FeatureStatus fs[] = { {.name = "soft limits", .status = diskquota_status_check_soft_limit}, {.name = "hard limits", .status = diskquota_status_check_hard_limit}, + {.name = "current binary version", .status = diskquota_status_binary_version}, + {.name = "current schema version", .status = diskquota_status_schema_version}, }; FuncCallContext *funcctx; diff --git a/tests/regress/expected/test_show_status.out b/tests/regress/expected/test_show_status.out index 68997f9a775..0ec41dc5f3a 100644 --- a/tests/regress/expected/test_show_status.out +++ b/tests/regress/expected/test_show_status.out @@ -1,4 +1,4 @@ -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+---------- soft limits | enabled @@ -7,7 +7,7 @@ select * from diskquota.status(); \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+--------- soft limits | enabled @@ -16,7 +16,7 @@ select * from diskquota.status(); \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+---------- soft limits | enabled @@ -27,7 +27,7 @@ select from diskquota.pause(); -- (1 row) -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+---------- soft limits | paused @@ -36,7 +36,7 @@ select * from diskquota.status(); \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+-------- soft limits | paused @@ -45,7 +45,7 @@ select * from diskquota.status(); \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+---------- soft limits | paused @@ -58,7 +58,7 @@ select from diskquota.resume(); \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; name | status -------------+---------- soft limits | enabled diff --git a/tests/regress/sql/test_show_status.sql b/tests/regress/sql/test_show_status.sql index b59af2c7b69..64fa4ebd270 100644 --- a/tests/regress/sql/test_show_status.sql +++ b/tests/regress/sql/test_show_status.sql @@ -1,25 +1,25 @@ -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; select from diskquota.pause(); -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; select from diskquota.resume(); \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null -select * from diskquota.status(); +select * from diskquota.status() where name not like '%version'; From 2acd1cb4d7ab0d4c4a602a3a9181b581c0a26360 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 10 Mar 2022 15:28:48 +0800 Subject: [PATCH 157/330] Upload every build to GCS and add build-info (#177) - Upload every build to GCS - BuildInfo_create cmake function to print some cmake vars into a build-info file which can be packaged. We avoid to use `cmake -LAH` here since that misses some important local cmake vars and most of the cmake cached var are not cared by us. - Output some info to diskquota-build-info and it will be packaged by cpack. - Match the whole GP_VERSION from pg_config.h in Gpdb.cmake - Modify editorconfig to use 2 spaces for cmake. - Add Git.cmake to retrieve information from a git repository. --- .editorconfig | 2 +- CMakeLists.txt | 18 +++++++++++++++++ cmake/BuildInfo.cmake | 32 ++++++++++++++++++++++++++++++ cmake/Git.cmake | 9 +++++++++ cmake/Gpdb.cmake | 8 ++++++++ concourse/pipeline/job_def.lib.yml | 17 +++++++++------- concourse/pipeline/res_def.yml | 29 +++++++++++++++++++++++++++ 7 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 cmake/BuildInfo.cmake create mode 100644 cmake/Git.cmake diff --git a/.editorconfig b/.editorconfig index ba9b81bbb1f..635d6f2296e 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,7 +16,7 @@ indent_size = 4 indent_style = space indent_size = 4 -[*.{dxl,mdp}] +[{*.cmake,CMakeLists.txt}] indent_style = space indent_size = 2 diff --git a/CMakeLists.txt b/CMakeLists.txt index e0a2994d602..1a2d1af1b12 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,8 +7,13 @@ project(diskquota) # generate 'compile_commands.json' set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +# Retrieve repository information +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Git.cmake) +GitHash_Get(DISKQUOTA_GIT_HASH) + include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Gpdb.cmake) + # set include directories for all sub-projects include_directories(${PG_INCLUDE_DIR_SERVER}) include_directories(${PG_INCLUDE_DIR}) # for libpq @@ -101,7 +106,20 @@ set(CPACK_PACKAGE_FILE_NAME include(CPack) # packing end +# Create build-info +# The diskquota-build-info shouldn't be copied to GPDB release by install_gpdb_component +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/BuildInfo.cmake) +set(build_info_PATH ${CMAKE_CURRENT_BINARY_DIR}/diskquota-build-info) +BuildInfo_Create(${build_info_PATH} + VARS + DISKQUOTA_GIT_HASH + DISKQUOTA_VERSION + GP_MAJOR_VERSION + GP_VERSION) +# Create build-info end + # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") install(TARGETS diskquota DESTINATION "lib/postgresql/") +install(FILES ${build_info_PATH} DESTINATION ".") diff --git a/cmake/BuildInfo.cmake b/cmake/BuildInfo.cmake new file mode 100644 index 00000000000..6e256f34502 --- /dev/null +++ b/cmake/BuildInfo.cmake @@ -0,0 +1,32 @@ +# Create a build info file based on the given cmake variables +# For example: +# BuildInfo_Create( +# ${CMAKE_CURRENT_BINARY_DIR}/build-info +# VARS +# DISKQUOTA_GIT_HASH +# GP_MAJOR_VERSION) +# ) +# will create a build info file: +# ❯ cat build-info +# DISKQUOTA_GIT_HASH = 151ed92 +# GP_MAJOR_VERSION = 6 + +function(BuildInfo_Create path) + cmake_parse_arguments( + arg + "" + "" + "VARS" + ${ARGN}) + + # Set REGRESS test cases + foreach(key IN LISTS arg_VARS) + get_property(val VARIABLE PROPERTY ${key}) + list(APPEND info_list "${key} = ${val}") + endforeach() + file(WRITE ${path} "") + foreach(content IN LISTS info_list) + file(APPEND ${path} "${content}\n") + endforeach() +endfunction() + diff --git a/cmake/Git.cmake b/cmake/Git.cmake new file mode 100644 index 00000000000..81a68b1f1f4 --- /dev/null +++ b/cmake/Git.cmake @@ -0,0 +1,9 @@ +# get git hash +macro(GitHash_Get _git_hash) + find_package(Git) + execute_process( + COMMAND ${GIT_EXECUTABLE} log -1 --pretty=format:%h + OUTPUT_VARIABLE ${_git_hash} + OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +endmacro() diff --git a/cmake/Gpdb.cmake b/cmake/Gpdb.cmake index 25fcfa15b69..b98d6dd47fd 100644 --- a/cmake/Gpdb.cmake +++ b/cmake/Gpdb.cmake @@ -5,6 +5,7 @@ # PG_CONFIG - the path to the pg_config executable to be used. this determines the # version to be built with. # GP_MAJOR_VERSION - the major version parsed from gpdb source +# GP_VERSION - The GP_VERSION string # PG_BIN_DIR - location of user executables # PG_INCLUDE_DIR - location of C header files of the client # PG_INCLUDE_DIR_SERVER - location of C header files for the server @@ -61,3 +62,10 @@ if (GP_MAJOR_VERSION) else() message(FATAL_ERROR "Cannot read GP_MAJORVERSION from '${PG_INCLUDE_DIR}/pg_config.h'") endif() +string(REGEX MATCH "#define *GP_VERSION *\"[^\"]*\"" macrodef "${config_header}") +string(REGEX REPLACE ".*\"\(.*\)\".*" "\\1" GP_VERSION "${macrodef}") +if (GP_VERSION) + message(STATUS "The exact GPDB version is '${GP_VERSION}'") +else() + message(FATAL_ERROR "Cannot read GP_VERSION from '${PG_INCLUDE_DIR}/pg_config.h'") +endif() diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index f7edc0f2501..3004d0bd99d 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -1,12 +1,13 @@ #@ load("base.lib.yml", "add_res_by_conf", "add_res_by_name") #@ load("@ytt:template", "template") -#! Job config for centos7 +#! Job config for centos6 #@ def centos6_gpdb6_conf(): res_build_image: centos6-gpdb6-image-build res_test_image: centos6-gpdb6-image-test res_gpdb_bin: bin_gpdb6_centos6 res_diskquota_bin: bin_diskquota_gpdb6_rhel6 +res_intermediates_bin: bin_diskquota_gpdb6_rhel6_intermediates os: rhel6 #@ end @@ -16,6 +17,7 @@ res_build_image: centos7-gpdb6-image-build res_test_image: centos7-gpdb6-image-test res_gpdb_bin: bin_gpdb6_centos7 res_diskquota_bin: bin_diskquota_gpdb6_rhel7 +res_intermediates_bin: bin_diskquota_gpdb6_rhel7_intermediates os: rhel7 #@ end @@ -25,6 +27,7 @@ res_build_image: rhel8-gpdb6-image-build res_test_image: rhel8-gpdb6-image-test res_gpdb_bin: bin_gpdb6_rhel8 res_diskquota_bin: bin_diskquota_gpdb6_rhel8 +res_intermediates_bin: bin_diskquota_gpdb6_rhel8_intermediates os: rhel8 #@ end @@ -34,6 +37,7 @@ res_build_image: ubuntu18-gpdb6-image-build res_test_image: ubuntu18-gpdb6-image-test res_gpdb_bin: bin_gpdb6_ubuntu18 res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 +res_intermediates_bin: bin_diskquota_gpdb6_ubuntu18_intermediates os: ubuntu18.04 #@ end @@ -54,11 +58,6 @@ file: diskquota_src/concourse/tasks/build_diskquota.yml image: #@ conf["res_build_image"] input_mapping: bin_gpdb: #@ conf["res_gpdb_bin"] - diskquota_artifacts: diskquota_artifacts -#! output_mapping is necessary. Otherwise we may use a wrong -#! diskquota_bin in the test task. -output_mapping: - "diskquota_artifacts": #@ "diskquota_artifacts_" + conf["os"] params: DISKQUOTA_OS: #@ conf["os"] #@ end @@ -70,7 +69,7 @@ file: diskquota_src/concourse/tasks/test_diskquota.yml image: #@ conf["res_test_image"] input_mapping: bin_gpdb: #@ conf["res_gpdb_bin"] - bin_diskquota: #@ "diskquota_artifacts_" + conf["os"] + bin_diskquota: diskquota_artifacts params: DISKQUOTA_OS: #@ conf["os"] #@ end @@ -81,6 +80,7 @@ params: #@ conf = param["conf"] #@ add_res_by_name(res_map, param["gpdb_src"]) #@ add_res_by_name(res_map, "bin_cmake") +#@ add_res_by_name(res_map, "bin_diskquota_intermediates") #@ add_res_by_conf(res_map, conf) name: #@ "build_test_" + conf["os"] max_in_flight: 10 @@ -107,4 +107,7 @@ plan: resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) - #@ _test_task(conf) +- put: #@ conf["res_intermediates_bin"] + params: + file: diskquota_artifacts/diskquota-*_x86_64.tar.gz #@ end diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 1d4f3e9ae3c..6ca128adc6d 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -157,6 +157,35 @@ resources: json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz +# For uploading every build to gcs +- name: bin_diskquota_gpdb6_rhel6_intermediates + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates/diskquota/diskquota_rhel6_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_rhel7_intermediates + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates/diskquota/diskquota_rhel7_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_rhel8_intermediates + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_ubuntu18_intermediates + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz + # Other dependencies - name: bin_cmake type: gcs From fd0b739bb52d8fe031a40ab8b06c51d37b274bfc Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 11 Mar 2022 11:08:16 +0800 Subject: [PATCH 158/330] Use cmake way to add C macro defines (#178) Otherwise it creates noise with LSP since the '-Dxxxx' is started with a '\n\t'. And that cannot be parsed by ccls. --- CMakeLists.txt | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1a2d1af1b12..720e7bced89 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,13 +43,12 @@ else() "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}") endif() -set(CMAKE_C_FLAGS - "${CMAKE_C_FLAGS} \ - -DDISKQUOTA_VERSION='\"${DISKQUOTA_VERSION}\"' \ - -DDISKQUOTA_MAJOR_VERSION=${DISKQUOTA_MAJOR_VERSION} \ - -DDISKQUOTA_MINOR_VERSION=${DISKQUOTA_MINOR_VERSION} \ - -DDISKQUOTA_PATCH_VERSION=${DISKQUOTA_PATCH_VERSION} \ - -DDISKQUOTA_BINARY_NAME='\"${DISKQUOTA_BINARY_NAME}\"'") +add_compile_definitions( + DISKQUOTA_VERSION="${DISKQUOTA_VERSION}" + DISKQUOTA_MAJOR_VERSION=${DISKQUOTA_MAJOR_VERSION} + DISKQUOTA_MINOR_VERSION=${DISKQUOTA_MINOR_VERSION} + DISKQUOTA_PATCH_VERSION=${DISKQUOTA_PATCH_VERSION} + DISKQUOTA_BINARY_NAME="${DISKQUOTA_BINARY_NAME}") list( APPEND From 6f9a1b03f83ab77ceb039e531233a9f19280a5ac Mon Sep 17 00:00:00 2001 From: Sasasu Date: Fri, 11 Mar 2022 11:20:34 +0800 Subject: [PATCH 159/330] ci: speed up the test --- tests/isolation2/expected/config.out | 8 +++----- tests/isolation2/sql/config.sql | 5 ++--- tests/regress/sql/config.sql | 7 +++---- upgrade_test/sql/downgrade_extension.sql | 2 -- upgrade_test/sql/upgrade_extension.sql | 2 -- 5 files changed, 8 insertions(+), 16 deletions(-) delete mode 100644 upgrade_test/sql/downgrade_extension.sql delete mode 100644 upgrade_test/sql/upgrade_extension.sql diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out index d57d72f809a..8916425ef0b 100644 --- a/tests/isolation2/expected/config.out +++ b/tests/isolation2/expected/config.out @@ -1,13 +1,11 @@ !\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); (exited with code 0) -!\retcode gpstop -raf; -(exited with code 0) - -!\retcode gpconfig -c diskquota.naptime -v 0; +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; (exited with code 0) -!\retcode gpconfig -c max_worker_processes -v 20; +!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; (exited with code 0) + !\retcode gpstop -raf; (exited with code 0) diff --git a/tests/isolation2/sql/config.sql b/tests/isolation2/sql/config.sql index 09111fd8a48..21c35e1796f 100644 --- a/tests/isolation2/sql/config.sql +++ b/tests/isolation2/sql/config.sql @@ -3,10 +3,9 @@ CREATE DATABASE diskquota; --end_ignore !\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); -!\retcode gpstop -raf; +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; -!\retcode gpconfig -c diskquota.naptime -v 0; -!\retcode gpconfig -c max_worker_processes -v 20; !\retcode gpstop -raf; -- Show the values of all GUC variables diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index fc0deb11e31..b5ac3df7221 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -2,11 +2,10 @@ CREATE DATABASE diskquota; \! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); -\! gpstop -raf +\! gpconfig -c diskquota.naptime -v 0 --skipvalidation +\! gpconfig -c max_worker_processes -v 20 --skipvalidation +\! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation -\! gpconfig -c diskquota.naptime -v 0 -\! gpconfig -c max_worker_processes -v 20 -\! gpconfig -c diskquota.hard_limit -v "off" \! gpstop -raf --end_ignore diff --git a/upgrade_test/sql/downgrade_extension.sql b/upgrade_test/sql/downgrade_extension.sql deleted file mode 100644 index d10f1216c50..00000000000 --- a/upgrade_test/sql/downgrade_extension.sql +++ /dev/null @@ -1,2 +0,0 @@ -\set old_version `echo $OLD_VERSION` -alter extension diskquota update to :'old_version'; diff --git a/upgrade_test/sql/upgrade_extension.sql b/upgrade_test/sql/upgrade_extension.sql deleted file mode 100644 index 4f3982d3c80..00000000000 --- a/upgrade_test/sql/upgrade_extension.sql +++ /dev/null @@ -1,2 +0,0 @@ -\set new_version `echo $NEW_VERSION` -alter extension diskquota update to :'new_version'; From e6256e7f70876d0b3062b65f7afdd443aeff634d Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 11 Mar 2022 16:43:51 +0800 Subject: [PATCH 160/330] Small tuning on logs (#154) - Add logs when launcher/bgworker terminates - Add missing information to the logs. --- diskquota.c | 69 +++++++++++++++++++++++++++++++++++------------- gp_activetable.c | 20 +++++++++++--- quotamodel.c | 14 ++++++---- 3 files changed, 76 insertions(+), 27 deletions(-) diff --git a/diskquota.c b/diskquota.c index 13385455c70..69a11a1d48a 100644 --- a/diskquota.c +++ b/diskquota.c @@ -129,7 +129,7 @@ _PG_init(void) /* diskquota.so must be in shared_preload_libraries to init SHM. */ if (!process_shared_preload_libraries_in_progress) { ereport(ERROR, ( - errmsg("booting diskquota-" DISKQUOTA_VERSION ", but " + errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort.") )); } else { @@ -294,7 +294,7 @@ disk_quota_worker_main(Datum main_arg) char *dbname = MyBgworkerEntry->bgw_name; ereport(LOG, - (errmsg("start disk quota worker process to monitor database:%s", + (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); /* Establish signal handlers before unblocking signals. */ @@ -346,16 +346,16 @@ disk_quota_worker_main(Datum main_arg) ereportif( !has_error && times == 0, WARNING, - (errmsg("[diskquota] worker for '%s' detected the installed version is %d.%d, " + (errmsg("[diskquota] worker for \"%s\" detected the installed version is \"%d.%d\", " "but current version is %s. abort due to version not match", dbname, major, minor, DISKQUOTA_VERSION), - errhint("run alter extension diskquota update to '%d.%d'", + errhint("run alter extension diskquota update to \"%d.%d\"", DISKQUOTA_MAJOR_VERSION, DISKQUOTA_MINOR_VERSION))); int rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET|WL_TIMEOUT|WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); if (rc & WL_POSTMASTER_DEATH) { - ereport(LOG, - (errmsg("[diskquota] bgworker for '%s' is being terminated by postmaster death.", dbname))); + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", + dbname))); proc_exit(-1); } @@ -398,7 +398,11 @@ disk_quota_worker_main(Datum main_arg) /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", + dbname))); proc_exit(1); + } /* In case of a SIGHUP, just reload the configuration. */ if (got_sighup) @@ -411,6 +415,8 @@ disk_quota_worker_main(Datum main_arg) /* if received sigterm, just exit the worker process */ if (got_sigterm) { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", + dbname))); /* clear the out-of-quota blacklist in shared memory */ invalidate_database_blackmap(MyDatabaseId); proc_exit(0); @@ -419,6 +425,9 @@ disk_quota_worker_main(Datum main_arg) /* Refresh quota model with init mode */ refresh_disk_quota_model(true); + ereport(LOG, + (errmsg("[diskquota] start bgworker loop for database: \"%s\"", + dbname))); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ @@ -445,7 +454,10 @@ disk_quota_worker_main(Datum main_arg) /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); proc_exit(1); + } /* In case of a SIGHUP, just reload the configuration. */ if (got_sighup) @@ -463,6 +475,8 @@ disk_quota_worker_main(Datum main_arg) worker_increase_epoch(MyDatabaseId); } + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", + dbname))); /* clear the out-of-quota blacklist in shared memory */ invalidate_database_blackmap(MyDatabaseId); proc_exit(0); @@ -525,6 +539,7 @@ disk_quota_launcher_main(Datum main_arg) */ start_workers_from_dblist(); + ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); /* main loop: do this until the SIGTERM handler tells us to terminate. */ EnableClientWaitTimeoutInterrupt(); StartIdleResourceCleanupTimers(); @@ -552,7 +567,11 @@ disk_quota_launcher_main(Datum main_arg) /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, + (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); proc_exit(1); + } /* process extension ddl message */ if (got_sigusr1) @@ -575,12 +594,14 @@ disk_quota_launcher_main(Datum main_arg) loop_end = time(NULL); if (isAbnormalLoopTime(loop_end - loop_begin)) { - ereport(WARNING, (errmsg("[diskquota-loop] loop takes too much time %d/%d", + ereport(WARNING, (errmsg("[diskquota launcher] loop takes too much time %d/%d", (int)(loop_end - loop_begin), diskquota_naptime))); } } /* terminate all the diskquota worker processes before launcher exit */ + ereport(LOG, + (errmsg("[diskquota launcher] launcher is being terminated by SIGTERM."))); terminate_all_workers(); proc_exit(0); } @@ -614,11 +635,12 @@ create_monitor_db_table(void) */ PG_TRY(); { - if (SPI_OK_CONNECT != SPI_connect()) + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT ) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("[diskquota launcher] unable to connect to execute internal query. return code: %d.", ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -627,9 +649,10 @@ create_monitor_db_table(void) /* debug_query_string need to be set for SPI_execute utility functions. */ debug_query_string = sql; - if (SPI_execute(sql, false, 0) != SPI_OK_UTILITY) + ret_code = SPI_execute(sql, false, 0); + if (ret_code != SPI_OK_UTILITY) { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql:'%s', errno:%d", sql, errno))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", errno: %d, ret_code: %d.", sql, errno, ret_code))); } } PG_CATCH(); @@ -677,13 +700,20 @@ start_workers_from_dblist(void) PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, errno:%d", errno))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, errno: %d, return code: %d.", errno, ret))); ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("select diskquota_namespace.database_list"))); + ereport(ERROR, (errmsg( + "[diskquota launcher] 'select diskquota_namespace.database_list', errno: %d, return code: %d", + errno, ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) - ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit"))); + { + ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", + tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); + ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit. natts: "))); + } + for (i = 0; i < SPI_processed; i++) { @@ -702,7 +732,7 @@ start_workers_from_dblist(void) ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid))); continue; } - elog(WARNING, "start workers"); + ereport(WARNING, (errmsg("[diskquota launcher] start workers"))); if (!start_worker_by_dboid(dbid)) ereport(ERROR, (errmsg("[diskquota launcher] start worker process of database(oid:%u) failed", dbid))); num++; @@ -781,11 +811,12 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local */ PG_TRY(); { - if (SPI_OK_CONNECT != SPI_connect()) + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -966,7 +997,9 @@ del_dbid_from_database_list(Oid dbid) ret = SPI_execute(str.data, false, 0); if (ret != SPI_OK_DELETE) { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql:'%s', errno:%d", str.data, errno))); + ereport(ERROR, + (errmsg("[diskquota launcher] SPI_execute sql: \"%s\", errno: %d, ret_code: %d.", + str.data, errno, ret))); } pfree(str.data); } diff --git a/gp_activetable.c b/gp_activetable.c index cbeb772f55d..a25659c80d7 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -383,11 +383,14 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) MemoryContext oldcontext; TupleDesc tupdesc; int extMajorVersion; - if (SPI_OK_CONNECT != SPI_connect()) + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg( + "unable to connect to execute internal query. return code: %d.", + ret_code))); } extMajorVersion = get_ext_major_version(); SPI_finish(); @@ -418,7 +421,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); break; default: - ereport(ERROR, (errmsg("Unused mode number, transaction will be aborted"))); + ereport(ERROR, (errmsg("Unused mode number %d, transaction will be aborted", mode))); break; } @@ -859,7 +862,7 @@ load_table_size(HTAB *local_table_stats_map) } if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: error code %d", errno))); + ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: return code %d, error: %m", ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 3 || @@ -867,6 +870,15 @@ load_table_size(HTAB *local_table_stats_map) ((tupdesc)->attrs[1])->atttypid != INT8OID || ((tupdesc)->attrs[2])->atttypid != INT2OID) { + if (tupdesc->natts != 3) + { + ereport(WARNING, (errmsg("[diskquota] tupdesc->natts: %d", tupdesc->natts))); + } + else + { + ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", + tupdesc->attrs[0]->atttypid, tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); + } ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," " please recreate diskquota extension", get_database_name(MyDatabaseId)))); diff --git a/quotamodel.c b/quotamodel.c index 5d7dd49b98a..239e66a17a6 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1296,11 +1296,12 @@ load_quotas(void) */ PG_TRY(); { - if (SPI_OK_CONNECT != SPI_connect()) + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unable to connect to execute SPI query"))); + errmsg("[diskquota] unable to connect to execute SPI query, return code: %d", ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -1425,7 +1426,8 @@ do_load_quotas(void) { if (quota_info[quotaType].num_keys != 1) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d", quotaType))); + errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d. num_keys: %d", + quotaType, quota_info[quotaType].num_keys))); } update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); } @@ -1730,6 +1732,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) HASH_SEQ_STATUS hash_seq; HTAB *local_blackmap; HASHCTL hashctl; + int ret_code; if (!superuser()) errmsg("must be superuser to update blackmap"); @@ -1744,10 +1747,11 @@ refresh_blackmap(PG_FUNCTION_ARGS) hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_REMOVE, NULL); LWLockRelease(diskquota_locks.black_map_lock); - if (SPI_connect() != SPI_OK_CONNECT) + ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + errmsg("unable to connect to execute internal query, return code: %d", ret_code))); /* * Secondly, iterate over blackmap entries and add these entries to the local black map From 7c15f82936752f536cab68d25954ad083b3be271 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 11 Mar 2022 17:12:38 +0800 Subject: [PATCH 161/330] Update gitignore (#180) - The build objects should always exist in build* directories now. - Add ignore pattern for idea, LSP and dinosaur developpers. --- .gitignore | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 332118e5a41..375d7244005 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,13 @@ -*.o -*.so -build +# Build directory +build*/ -regression.out -regression.diffs +# The tests results /results/ +# For IDE/Editors .vscode -upgrade_test/regression.out -upgrade_test/regression.diffs -upgrade_test/results +.idea +tags +cscope* +.ccls-cache/ +compile_commands.json From ac93656e323c8b949ada6765b20be8063b5e12e4 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 15 Mar 2022 17:15:34 +0800 Subject: [PATCH 162/330] Add test cases for postmaster restart (#135) - When postmaster dies, the launcher and workers should be terminated. - When postmaster restarts, they should be restarted as well. - Move common ignored patterns to the top init file. --- tests/CMakeLists.txt | 1 - tests/init_file | 13 ++ .../expected/test_postmaster_restart.out | 134 ++++++++++++++++++ tests/isolation2/isolation2_schedule | 1 + .../sql/test_postmaster_restart.sql | 51 +++++++ tests/regress/regress_init_file | 14 -- 6 files changed, 199 insertions(+), 15 deletions(-) create mode 100644 tests/isolation2/expected/test_postmaster_restart.out create mode 100644 tests/isolation2/sql/test_postmaster_restart.sql delete mode 100644 tests/regress/regress_init_file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index fc3363c1512..af2b8202d14 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,7 +3,6 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file - ${CMAKE_CURRENT_SOURCE_DIR}/regress/regress_init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results diff --git a/tests/init_file b/tests/init_file index 46f44f4c788..114c58e4a24 100644 --- a/tests/init_file +++ b/tests/init_file @@ -19,4 +19,17 @@ s/^CONTEXT:/DETAIL:/ # E.g., (slice1 XXX.XXX.XXX.XXX:XXXX pid=XXXX) m/(slice\d+ [0-9.]+:\d+ pid=\d+)/ s/(slice\d+ [0-9.]+:\d+ pid=\d+)// + +# Remove oid of schema/role/tablespace from error message. +m/ERROR: role's disk space quota exceeded with name:\d+.*/ +s/ERROR: role's disk space quota exceeded with name:\d+.*/[hardlimit] role's disk space quota exceeded/ + +m/ERROR: schema's disk space quota exceeded with name:\d+.*/ +s/ERROR: schema's disk space quota exceeded with name:\d+.*/[hardlimit] schema's disk space quota exceeded/ + +m/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/ +s/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ + +m/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/ +s/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ -- end_matchsubs diff --git a/tests/isolation2/expected/test_postmaster_restart.out b/tests/isolation2/expected/test_postmaster_restart.out new file mode 100644 index 00000000000..f08f1c31937 --- /dev/null +++ b/tests/isolation2/expected/test_postmaster_restart.out @@ -0,0 +1,134 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA postmaster_restart_s; +CREATE +1: SET search_path TO postmaster_restart_s; +SET + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,1000000); +ERROR: schema's disk space quota exceeded with name:157893 (seg0 127.0.0.1:6002 pid=1025673) +1q: ... + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +2774491 + +-- end_ignore +(exited with code 0) +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +2774659 + +-- end_ignore +(exited with code 0) + +-- stop postmaster +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; +-- start_ignore +waiting for server to shut down.... done +server stopped +-- end_ignore +(exited with code 0) + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore + +-- end_ignore +(exited with code 1) +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore + +-- end_ignore +(exited with code 1) + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; +-- start_ignore +waiting for server to start....2022-02-14 21:41:39.147869 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""ftsprobe process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.147899 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""dtx recovery process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.147934 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.148550 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""[diskquota] - launcher""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.272714 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""pg_log"".",,,,,,"SysLogger_Start","syslogger.c",986, + done +server started + +-- end_ignore +(exited with code 0) +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; +-- start_ignore + +-- end_ignore +(exited with code 0) + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +2771049 + +-- end_ignore +(exited with code 0) +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +2771074 + +-- end_ignore +(exited with code 0) + +1: SET search_path TO postmaster_restart_s; +SET +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,1000000); +ERROR: schema's disk space quota exceeded with name:158089 (seg0 127.0.0.1:6002 pid=1027799) +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); +CREATE 1000000 + +1: DROP SCHEMA postmaster_restart_s CASCADE; +DROP +1q: ... diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index f9950222bfc..92c6490f007 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -4,6 +4,7 @@ test: test_relation_size test: test_blackmap test: test_vacuum test: test_truncate +test: test_postmaster_restart test: test_worker_timeout test: test_drop_extension test: reset_config diff --git a/tests/isolation2/sql/test_postmaster_restart.sql b/tests/isolation2/sql/test_postmaster_restart.sql new file mode 100644 index 00000000000..dba52dc3e63 --- /dev/null +++ b/tests/isolation2/sql/test_postmaster_restart.sql @@ -0,0 +1,51 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +!\retcode gpstop -u > /dev/null; + +1: CREATE SCHEMA postmaster_restart_s; +1: SET search_path TO postmaster_restart_s; + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,1000000); +1q: + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +-- stop postmaster +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; + +1: SET search_path TO postmaster_restart_s; +1: SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,1000000); +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); + +1: DROP SCHEMA postmaster_restart_s CASCADE; +1q: diff --git a/tests/regress/regress_init_file b/tests/regress/regress_init_file deleted file mode 100644 index 63dd2602ff0..00000000000 --- a/tests/regress/regress_init_file +++ /dev/null @@ -1,14 +0,0 @@ --- start_matchsubs -# Remove oid of schema/role/tablespace from error message. -m/ERROR: role's disk space quota exceeded with name:\d+.*/ -s/ERROR: role's disk space quota exceeded with name:\d+.*/[hardlimit] role's disk space quota exceeded/ - -m/ERROR: schema's disk space quota exceeded with name:\d+.*/ -s/ERROR: schema's disk space quota exceeded with name:\d+.*/[hardlimit] schema's disk space quota exceeded/ - -m/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/ -s/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ - -m/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/ -s/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ --- end_matchsubs From df2f7d79f37de91014ec88fd899c7bc24bbd6269 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Wed, 16 Mar 2022 17:29:15 +0800 Subject: [PATCH 163/330] Fix memory leak when refresh_blackmap() (#182) The ever growing part of memory contains the following pattern: refresh_blackmap.*: pid= The pattern contains info of segments. This indicates that the results of dispatching refresh_blackmap() might not be freed properly. --- quotamodel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/quotamodel.c b/quotamodel.c index 239e66a17a6..9c105a65f00 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1259,6 +1259,7 @@ dispatch_blackmap(HTAB *local_active_table_stat_map) pfree(rows.data); pfree(active_oids.data); pfree(sql.data); + cdbdisp_clearCdbPgResults(&cdb_pgresults); } /* From 037675876fd6e4fbff8e63188741c833b59407ee Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 17 Mar 2022 13:58:10 +0800 Subject: [PATCH 164/330] Format the code and add clang-format to PR pipeline (#183) - Use clang 13 to format the code - Rename the job 'gate' to 'entrance', prepare to add 'exit' job - Add clang-format as an entrance check into the PR pipeline --- .clang-format | 7 +- README.md | 35 +- concourse/pipeline/commit.yml | 8 +- concourse/pipeline/dev.yml | 8 +- concourse/pipeline/job_def.lib.yml | 31 +- concourse/pipeline/pr.yml | 8 +- concourse/pipeline/res_def.yml | 8 + concourse/scripts/check-clang-format.sh | 12 + diskquota.c | 613 +++++------- diskquota.h | 86 +- diskquota_utility.c | 931 ++++++++--------- enforcement.c | 22 +- gp_activetable.c | 610 +++++------- gp_activetable.h | 32 +- quotamodel.c | 1209 ++++++++++------------- relation_cache.c | 277 +++--- relation_cache.h | 30 +- 17 files changed, 1807 insertions(+), 2120 deletions(-) create mode 100755 concourse/scripts/check-clang-format.sh diff --git a/.clang-format b/.clang-format index 72ce535cdc0..f4315b447bd 100644 --- a/.clang-format +++ b/.clang-format @@ -1,6 +1,8 @@ --- BasedOnStyle: Google +ColumnLimit: 120 + # How much whitespace? UseTab: ForIndentation TabWidth: 4 @@ -34,7 +36,4 @@ BraceWrapping: SplitEmptyFunction: false SplitEmptyRecord: false -# Put "postgres.h" and "postgres_undefs.h" first in a group of includes. -IncludeCategories: -- Regex: '"postgres(_undefs)?.h"' - Priority: 1 +SortIncludes: false diff --git a/README.md b/README.md index f5292c187f2..135c637116a 100644 --- a/README.md +++ b/README.md @@ -82,9 +82,9 @@ cluster level, we limit the diskquota of a role to be database specific. That is to say, a role may have different quota limit on different databases and their disk usage is isolated between databases. -# Install +# Development -(cmake)[https://cmake.org] (>= 3.18) needs to be installed. +[cmake](https://cmake.org) (>= 3.18) needs to be installed. 1. Build & install disk quota ``` @@ -141,6 +141,37 @@ create extension diskquota; select diskquota.init_table_size_table(); ``` +## clang-format + +In order to pass the CI check for PR, the changed code needs to be formated by +[clang-format](https://clang.llvm.org/docs/ClangFormat.html) **13**. A static-linked +version can be found at https://github.com/beeender/clang-tools-static-binaries/releases/tag/master-7d0aff9a . + +To format all the source files in the git tree: + +``` +git ls-files '*.c' '*.h' | xargs clang-format --style=file -i +``` + +If you have `git-clang-format` installed, it can be as easy as: + +``` +git clang-format +``` + +To skip formatting a certain piece of code: + +```c +/* clang-format off */ +#if SOME_MACRO +#define DO_NOT_FORMAT_ME \ + (1 \ + + \ + )\ +#endif +/* clang-format on */ +``` + # Usage 1. Set/update/delete schema quota limit using diskquota.set_schema_quota ``` diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml index df5efa78a5e..eb9bd7cc590 100644 --- a/concourse/pipeline/commit.yml +++ b/concourse/pipeline/commit.yml @@ -1,5 +1,5 @@ #@ load("job_def.lib.yml", -#@ "gate_job", +#@ "entrance_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -20,7 +20,11 @@ #@ ubuntu18_gpdb6_conf() #@ ] jobs: -- #@ gate_job(trigger) +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ } +- #@ entrance_job(param) #@ for conf in confs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index 83fd6b17aa5..e657e3986bc 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -1,5 +1,5 @@ #@ load("job_def.lib.yml", -#@ "gate_job", +#@ "entrance_check_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -16,7 +16,11 @@ #@ confs= [ #@ ubuntu18_gpdb6_conf()] jobs: -- #@ gate_job(trigger) +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ } +- #@ entrance_check_job(param) #@ for conf in confs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 3004d0bd99d..1468041288d 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -41,8 +41,24 @@ res_intermediates_bin: bin_diskquota_gpdb6_ubuntu18_intermediates os: ubuntu18.04 #@ end -#@ def gate_job(trigger): -name: gate +#! The entry point of a pipeline. The job name must be 'entrance'. +#@ def entrance_job(param): +#@ trigger = param["trigger"] +name: entrance +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +plan: +#@ for to_get in trigger["to_get"]: +- trigger: true + _: #@ template.replace(to_get) +#@ end +#@ end + +#! Like the entrance_job, with more static checks. +#@ def entrance_check_job(param): +#@ add_res_by_name(param["res_map"], "clang-format-image") +#@ trigger = param["trigger"] +name: entrance on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: @@ -50,6 +66,15 @@ plan: - trigger: true _: #@ template.replace(to_get) #@ end +- get: clang-format-image +- task: check_clang_format + image: clang-format-image + config: + inputs: + - name: diskquota_src + platform: linux + run: + path: diskquota_src/concourse/scripts/check-clang-format.sh #@ end #@ def _build_task(conf): @@ -89,7 +114,7 @@ on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: #@ for to_get in trigger["to_get"]: -- passed: [gate] +- passed: [entrance] trigger: true _: #@ template.replace(to_get) #@ end diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index a145109ff1f..716fde9e1bb 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -1,5 +1,5 @@ #@ load("job_def.lib.yml", -#@ "gate_job", +#@ "entrance_check_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -22,7 +22,11 @@ #@ ubuntu18_gpdb6_conf() #@ ] jobs: -- #@ gate_job(trigger) +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ } +- #@ entrance_check_job(param) #@ for conf in confs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 6ca128adc6d..93ce5ba0650 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -101,6 +101,14 @@ resources: source: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest +# clang_format +- name: clang-format-image + type: registry-image + source: + repository: gcr.io/data-gpdb-extensions/common/clang-format + tag: 13 + username: _json_key + password: ((extensions-gcs-service-account-key)) # gpdb binary on gcs is located as different folder for different version - name: bin_gpdb6_centos6 diff --git a/concourse/scripts/check-clang-format.sh b/concourse/scripts/check-clang-format.sh new file mode 100755 index 00000000000..963fd1e67be --- /dev/null +++ b/concourse/scripts/check-clang-format.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Due to the limitation of concourse git/github-pr resource, it is difficult to +# only check the format of the git diff. So all the source code are being +# checked. + +set -eox pipefail + +src_dir=$(dirname "${BASH_SOURCE[0]}")/../.. +pushd "${src_dir}" +git ls-files '*.c' '*.h' | \ + xargs clang-format --style=file --verbose --Werror -dry-run +popd diff --git a/diskquota.c b/diskquota.c index 69a11a1d48a..5ed631bc5e4 100644 --- a/diskquota.c +++ b/diskquota.c @@ -38,9 +38,10 @@ PG_MODULE_MAGIC; -#define DISKQUOTA_DB "diskquota" -#define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" +#define DISKQUOTA_DB "diskquota" +#define DISKQUOTA_APPLICATION_NAME "gp_reserved_gpdiskquota" +/* clang-format off */ #if !defined(DISKQUOTA_VERSION) || \ !defined(DISKQUOTA_MAJOR_VERSION) || \ !defined(DISKQUOTA_PATCH_VERSION) || \ @@ -48,27 +49,28 @@ PG_MODULE_MAGIC; !defined(DISKQUOTA_BINARY_NAME) #error Version not found. Please check if the VERSION file exists. #endif +/* clang-format on */ -#include // for useconds_t +#include // for useconds_t extern int usleep(useconds_t usec); // in /* flags set by signal handlers */ -static volatile sig_atomic_t got_sighup = false; +static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; static volatile sig_atomic_t got_sigusr1 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; -int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ -bool diskquota_hardlimit = false; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ +bool diskquota_hardlimit = false; -DiskQuotaLocks diskquota_locks; +DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; /* using hash table to support incremental update the table size entry.*/ -HTAB *disk_quota_worker_map = NULL; -static int num_db = 0; +HTAB *disk_quota_worker_map = NULL; +static int num_db = 0; bool diskquota_is_paused() @@ -78,13 +80,11 @@ diskquota_is_paused() LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; - bool found; + DiskQuotaWorkerEntry *hash_entry; + bool found; - hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, - (void*)&MyDatabaseId, - HASH_FIND, - &found); + hash_entry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); paused = found ? hash_entry->is_paused : false; } LWLockRelease(diskquota_locks.worker_map_lock); @@ -93,10 +93,10 @@ diskquota_is_paused() } /* functions of disk quota*/ -void _PG_init(void); -void _PG_fini(void); -void disk_quota_worker_main(Datum); -void disk_quota_launcher_main(Datum); +void _PG_init(void); +void _PG_fini(void); +void disk_quota_worker_main(Datum); +void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); @@ -107,12 +107,11 @@ static void create_monitor_db_table(void); static void add_dbid_to_database_list(Oid dbid); static void del_dbid_from_database_list(Oid dbid); static void process_extension_ddl_message(void); -static void do_process_extension_ddl_message(MessageResult * code, - ExtensionDDLMessage local_extension_ddl_message); +static void do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message); static void try_kill_db_worker(Oid dbid); static void terminate_all_workers(void); -static void on_add_db(Oid dbid, MessageResult * code); -static void on_del_db(Oid dbid, MessageResult * code); +static void on_add_db(Oid dbid, MessageResult *code); +static void on_del_db(Oid dbid, MessageResult *code); static bool is_valid_dbid(Oid dbid); extern void invalidate_database_blackmap(Oid dbid); @@ -127,13 +126,13 @@ void _PG_init(void) { /* diskquota.so must be in shared_preload_libraries to init SHM. */ - if (!process_shared_preload_libraries_in_progress) { - ereport(ERROR, ( - errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " - DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort.") - )); - } else { - ereport(INFO, (errmsg("booting diskquota-"DISKQUOTA_VERSION))); + if (!process_shared_preload_libraries_in_progress) + { + ereport(ERROR, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME + " not in shared_preload_libraries. abort."))); + } else + { + ereport(INFO, (errmsg("booting diskquota-" DISKQUOTA_VERSION))); } BackgroundWorker worker; @@ -156,8 +155,7 @@ _PG_init(void) } /* set up common data for diskquota launcher worker */ - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | - BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; /* launcher process should be restarted after pm reset. */ worker.bgw_restart_time = BGW_DEFAULT_RESTART_INTERVAL; @@ -172,8 +170,7 @@ _PG_init(void) void _PG_fini(void) -{ -} +{} /* * Signal handler for SIGTERM @@ -183,11 +180,10 @@ _PG_fini(void) static void disk_quota_sigterm(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; got_sigterm = true; - if (MyProc) - SetLatch(&MyProc->procLatch); + if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; } @@ -200,11 +196,10 @@ disk_quota_sigterm(SIGNAL_ARGS) static void disk_quota_sighup(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; got_sighup = true; - if (MyProc) - SetLatch(&MyProc->procLatch); + if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; } @@ -216,12 +211,11 @@ disk_quota_sighup(SIGNAL_ARGS) static void disk_quota_sigusr1(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; got_sigusr1 = true; - if (MyProc) - SetLatch(&MyProc->procLatch); + if (MyProc) SetLatch(&MyProc->procLatch); errno = save_errno; } @@ -232,54 +226,16 @@ disk_quota_sigusr1(SIGNAL_ARGS) static void define_guc_variables(void) { - DefineCustomIntVariable("diskquota.naptime", - "Duration between each check (in seconds).", - NULL, - &diskquota_naptime, - 2, - 0, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomIntVariable("diskquota.max_active_tables", - "Max number of active tables monitored by disk-quota.", - NULL, - &diskquota_max_active_tables, - 1 * 1024 * 1024, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomIntVariable("diskquota.worker_timeout", - "Duration between each check (in seconds).", - NULL, - &diskquota_worker_timeout, - 60, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - DefineCustomBoolVariable("diskquota.hard_limit", - "Set this to 'on' to enable disk-quota hardlimit.", - NULL, - &diskquota_hardlimit, - false, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); + DefineCustomIntVariable("diskquota.naptime", "Duration between each check (in seconds).", NULL, &diskquota_naptime, + 2, 0, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + + DefineCustomIntVariable("diskquota.max_active_tables", "Max number of active tables monitored by disk-quota.", NULL, + &diskquota_max_active_tables, 1 * 1024 * 1024, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + + DefineCustomIntVariable("diskquota.worker_timeout", "Duration between each check (in seconds).", NULL, + &diskquota_worker_timeout, 60, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + DefineCustomBoolVariable("diskquota.hard_limit", "Set this to 'on' to enable disk-quota hardlimit.", NULL, + &diskquota_hardlimit, false, PGC_SIGHUP, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -291,11 +247,9 @@ define_guc_variables(void) void disk_quota_worker_main(Datum main_arg) { - char *dbname = MyBgworkerEntry->bgw_name; + char *dbname = MyBgworkerEntry->bgw_name; - ereport(LOG, - (errmsg("[diskquota] start disk quota worker process to monitor database:%s", - dbname))); + ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); @@ -308,9 +262,8 @@ disk_quota_worker_main(Datum main_arg) /* Connect to our database */ BackgroundWorkerInitializeConnection(dbname, NULL); - set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, - PGC_USERSET,PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0); /* diskquota worker should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; @@ -323,39 +276,41 @@ disk_quota_worker_main(Datum main_arg) // check current binary version and SQL DLL version are matched int times = 0; - while (!got_sigterm) { + while (!got_sigterm) + { CHECK_FOR_INTERRUPTS(); int major = -1, minor = -1; int has_error = worker_spi_get_extension_version(&major, &minor) != 0; - if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) - break; + if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) break; - if (has_error) { + if (has_error) + { static char _errfmt[] = "find issues in pg_class.pg_extension check server log. waited %d seconds", - _errmsg[sizeof(_errfmt) + sizeof("2147483647" /* INT_MAX */) + 1] = {}; + _errmsg[sizeof(_errfmt) + sizeof("2147483647" /* INT_MAX */) + 1] = {}; snprintf(_errmsg, sizeof(_errmsg), _errfmt, times * diskquota_naptime); init_ps_display("bgworker:", "[diskquota]", dbname, _errmsg); - } else { + } else + { init_ps_display("bgworker:", "[diskquota]", dbname, - "v" DISKQUOTA_VERSION " is not matching with current SQL. stop working"); + "v" DISKQUOTA_VERSION " is not matching with current SQL. stop working"); } - ereportif( - !has_error && times == 0, - WARNING, - (errmsg("[diskquota] worker for \"%s\" detected the installed version is \"%d.%d\", " - "but current version is %s. abort due to version not match", dbname, major, minor, DISKQUOTA_VERSION), - errhint("run alter extension diskquota update to \"%d.%d\"", - DISKQUOTA_MAJOR_VERSION, DISKQUOTA_MINOR_VERSION))); + ereportif(!has_error && times == 0, WARNING, + (errmsg("[diskquota] worker for \"%s\" detected the installed version is \"%d.%d\", " + "but current version is %s. abort due to version not match", + dbname, major, minor, DISKQUOTA_VERSION), + errhint("run alter extension diskquota update to \"%d.%d\"", DISKQUOTA_MAJOR_VERSION, + DISKQUOTA_MINOR_VERSION))); - int rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET|WL_TIMEOUT|WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); + int rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); - if (rc & WL_POSTMASTER_DEATH) { - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", - dbname))); + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); proc_exit(-1); } @@ -373,7 +328,7 @@ disk_quota_worker_main(Datum main_arg) /* Waiting for diskquota state become ready */ while (!got_sigterm) { - int rc; + int rc; CHECK_FOR_INTERRUPTS(); @@ -387,20 +342,16 @@ disk_quota_worker_main(Datum main_arg) { break; } - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true - if (!diskquota_naptime) - usleep(1); + if (!diskquota_naptime) usleep(1); /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) { - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", - dbname))); + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by postmaster death.", dbname))); proc_exit(1); } @@ -415,8 +366,7 @@ disk_quota_worker_main(Datum main_arg) /* if received sigterm, just exit the worker process */ if (got_sigterm) { - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", - dbname))); + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); /* clear the out-of-quota blacklist in shared memory */ invalidate_database_blackmap(MyDatabaseId); proc_exit(0); @@ -425,15 +375,13 @@ disk_quota_worker_main(Datum main_arg) /* Refresh quota model with init mode */ refresh_disk_quota_model(true); - ereport(LOG, - (errmsg("[diskquota] start bgworker loop for database: \"%s\"", - dbname))); + ereport(LOG, (errmsg("[diskquota] start bgworker loop for database: \"%s\"", dbname))); /* * Main loop: do this until the SIGTERM handler tells us to terminate */ while (!got_sigterm) { - int rc; + int rc; CHECK_FOR_INTERRUPTS(); @@ -443,14 +391,11 @@ disk_quota_worker_main(Datum main_arg) * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true - if (!diskquota_naptime) - usleep(1); + if (!diskquota_naptime) usleep(1); /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) @@ -469,23 +414,22 @@ disk_quota_worker_main(Datum main_arg) SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); /* Do the work */ - if (!diskquota_is_paused()) - refresh_disk_quota_model(false); + if (!diskquota_is_paused()) refresh_disk_quota_model(false); worker_increase_epoch(MyDatabaseId); } - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", - dbname))); + ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); /* clear the out-of-quota blacklist in shared memory */ invalidate_database_blackmap(MyDatabaseId); proc_exit(0); } -static inline bool isAbnormalLoopTime(int diff_sec) +static inline bool +isAbnormalLoopTime(int diff_sec) { int max_time; - if (diskquota_naptime>6) + if (diskquota_naptime > 6) max_time = diskquota_naptime * 2; else max_time = diskquota_naptime + 6; @@ -520,9 +464,8 @@ disk_quota_launcher_main(Datum main_arg) */ BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); - set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, - PGC_USERSET,PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0); /* diskquota launcher should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; @@ -546,7 +489,7 @@ disk_quota_launcher_main(Datum main_arg) loop_end = time(NULL); while (!got_sigterm) { - int rc; + int rc; CHECK_FOR_INTERRUPTS(); @@ -556,20 +499,16 @@ disk_quota_launcher_main(Datum main_arg) * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); // wait at least one time slice, avoid 100% CPU usage - if (!diskquota_naptime) - usleep(1); + if (!diskquota_naptime) usleep(1); /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) { - ereport(LOG, - (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); + ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); proc_exit(1); } @@ -591,22 +530,20 @@ disk_quota_launcher_main(Datum main_arg) StartIdleResourceCleanupTimers(); } loop_begin = loop_end; - loop_end = time(NULL); + loop_end = time(NULL); if (isAbnormalLoopTime(loop_end - loop_begin)) { ereport(WARNING, (errmsg("[diskquota launcher] loop takes too much time %d/%d", - (int)(loop_end - loop_begin), diskquota_naptime))); + (int)(loop_end - loop_begin), diskquota_naptime))); } } /* terminate all the diskquota worker processes before launcher exit */ - ereport(LOG, - (errmsg("[diskquota launcher] launcher is being terminated by SIGTERM."))); + ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by SIGTERM."))); terminate_all_workers(); proc_exit(0); } - /* * Create table to record the list of monitored databases * we need a place to store the database with diskquota enabled @@ -619,12 +556,12 @@ static void create_monitor_db_table(void) { const char *sql; - bool connected = false; - bool pushed_active_snap = false; - bool ret = true; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; StartTransactionCommand(); @@ -636,11 +573,11 @@ create_monitor_db_table(void) PG_TRY(); { int ret_code = SPI_connect(); - if (ret_code != SPI_OK_CONNECT ) + if (ret_code != SPI_OK_CONNECT) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota launcher] unable to connect to execute internal query. return code: %d.", ret_code))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota launcher] unable to connect to execute internal query. return code: %d.", + ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -652,7 +589,8 @@ create_monitor_db_table(void) ret_code = SPI_execute(sql, false, 0); if (ret_code != SPI_OK_UTILITY) { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", errno: %d, ret_code: %d.", sql, errno, ret_code))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", errno: %d, ret_code: %d.", sql, + errno, ret_code))); } } PG_CATCH(); @@ -661,16 +599,14 @@ create_monitor_db_table(void) HOLD_INTERRUPTS(); EmitErrorReport(); FlushErrorState(); - ret = false; + ret = false; debug_query_string = NULL; /* Now we can allow interrupts again */ RESUME_INTERRUPTS(); } PG_END_TRY(); - if (connected) - SPI_finish(); - if (pushed_active_snap) - PopActiveSnapshot(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); if (ret) CommitTransactionCommand(); else @@ -686,10 +622,10 @@ create_monitor_db_table(void) static void start_workers_from_dblist(void) { - TupleDesc tupdesc; - int num = 0; - int ret; - int i; + TupleDesc tupdesc; + int num = 0; + int ret; + int i; /* * Don't catch errors in start_workers_from_dblist. Since this is the @@ -703,33 +639,32 @@ start_workers_from_dblist(void) ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, errno: %d, return code: %d.", errno, ret))); ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg( - "[diskquota launcher] 'select diskquota_namespace.database_list', errno: %d, return code: %d", - errno, ret))); + ereport(ERROR, + (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', errno: %d, return code: %d", + errno, ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) { ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", - tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); + tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit. natts: "))); } - for (i = 0; i < SPI_processed; i++) { - HeapTuple tup; - Oid dbid; - Datum dat; - bool isnull; + HeapTuple tup; + Oid dbid; + Datum dat; + bool isnull; tup = SPI_tuptable->vals[i]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - ereport(ERROR, (errmsg("[diskquota launcher] dbid cann't be null in table database_list"))); + if (isnull) ereport(ERROR, (errmsg("[diskquota launcher] dbid cann't be null in table database_list"))); dbid = DatumGetObjectId(dat); if (!is_valid_dbid(dbid)) { - ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", dbid))); + ereport(LOG, (errmsg("[diskquota launcher] database(oid:%u) in table database_list is not a valid database", + dbid))); continue; } ereport(WARNING, (errmsg("[diskquota launcher] start workers"))); @@ -743,7 +678,9 @@ start_workers_from_dblist(void) */ if (num >= MAX_NUM_MONITORED_DB) { - ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) will not enable diskquota", dbid))); + ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " + "will not enable diskquota", + dbid))); break; } } @@ -763,7 +700,7 @@ start_workers_from_dblist(void) static void process_extension_ddl_message() { - MessageResult code = ERR_UNKNOWN; + MessageResult code = ERR_UNKNOWN; ExtensionDDLMessage local_extension_ddl_message; LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); @@ -771,8 +708,7 @@ process_extension_ddl_message() LWLockRelease(diskquota_locks.extension_ddl_message_lock); /* create/drop extension message must be valid */ - if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) - return; + if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) return; ereport(LOG, (errmsg("[diskquota launcher]: received create/drop extension diskquota message"))); @@ -782,11 +718,10 @@ process_extension_ddl_message() LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); memset(extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); extension_ddl_message->launcher_pid = MyProcPid; - extension_ddl_message->result = (int) code; + extension_ddl_message->result = (int)code; LWLockRelease(diskquota_locks.extension_ddl_message_lock); } - /* * Process 'create extension' and 'drop extension' message. * For 'create extension' message, store dbid into table @@ -795,12 +730,12 @@ process_extension_ddl_message() * 'database_list' and stop the diskquota worker process. */ static void -do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local_extension_ddl_message) +do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message) { - int old_num_db = num_db; - bool connected = false; - bool pushed_active_snap = false; - bool ret = true; + int old_num_db = num_db; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; StartTransactionCommand(); @@ -814,9 +749,8 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local int ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -835,7 +769,8 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local *code = ERR_OK; break; default: - ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", local_extension_ddl_message.cmd))); + ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", + local_extension_ddl_message.cmd))); *code = ERR_UNKNOWN; break; } @@ -846,16 +781,14 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local HOLD_INTERRUPTS(); EmitErrorReport(); FlushErrorState(); - ret = false; + ret = false; num_db = old_num_db; RESUME_INTERRUPTS(); } PG_END_TRY(); - if (connected) - SPI_finish(); - if (pushed_active_snap) - PopActiveSnapshot(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); if (ret) CommitTransactionCommand(); else @@ -868,7 +801,7 @@ do_process_extension_ddl_message(MessageResult * code, ExtensionDDLMessage local * we set it, and error out */ static void -on_add_db(Oid dbid, MessageResult * code) +on_add_db(Oid dbid, MessageResult *code) { if (num_db >= MAX_NUM_MONITORED_DB) { @@ -901,7 +834,6 @@ on_add_db(Oid dbid, MessageResult * code) *code = ERR_START_WORKER; ereport(ERROR, (errmsg("[diskquota launcher] failed to start worker - dbid=%u", dbid))); } - } /* @@ -912,7 +844,7 @@ on_add_db(Oid dbid, MessageResult * code) * 3. invalidate black-map entries and monitoring_dbid_cache from shared memory */ static void -on_del_db(Oid dbid, MessageResult * code) +on_del_db(Oid dbid, MessageResult *code) { if (!is_valid_dbid(dbid)) { @@ -937,7 +869,6 @@ on_del_db(Oid dbid, MessageResult * code) PG_RE_THROW(); } PG_END_TRY(); - } /* @@ -948,34 +879,34 @@ on_del_db(Oid dbid, MessageResult * code) static void add_dbid_to_database_list(Oid dbid) { - int ret; + int ret; - Oid argt[1] = {INT4OID}; + Oid argt[1] = {INT4OID}; Datum argv[1] = {Int32GetDatum(dbid)}; - ret = SPI_execute_with_args( - "select * from diskquota_namespace.database_list where dbid = $1", - 1, argt, argv, NULL, true, 0); + ret = SPI_execute_with_args("select * from diskquota_namespace.database_list where dbid = $1", 1, argt, argv, NULL, + true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg( - "[diskquota launcher] error occured while checking database_list, " - " code = %d errno = %d", ret, errno))); - - if (SPI_processed == 1) { - ereport(WARNING, (errmsg( - "[diskquota launcher] database id %d is already actived, " - "skip database_list update", dbid))); + ereport(ERROR, (errmsg("[diskquota launcher] error occured while checking database_list, " + " code = %d errno = %d", + ret, errno))); + + if (SPI_processed == 1) + { + ereport(WARNING, (errmsg("[diskquota launcher] database id %d is already actived, " + "skip database_list update", + dbid))); return; } - ret = SPI_execute_with_args("insert into diskquota_namespace.database_list values($1)", - 1, argt,argv, NULL, false, 0); + ret = SPI_execute_with_args("insert into diskquota_namespace.database_list values($1)", 1, argt, argv, NULL, false, + 0); if (ret != SPI_OK_INSERT || SPI_processed != 1) - ereport(ERROR, (errmsg( - "[diskquota launcher] error occured while updating database_list, " - " code = %d errno = %d", ret, errno))); + ereport(ERROR, (errmsg("[diskquota launcher] error occured while updating database_list, " + " code = %d errno = %d", + ret, errno))); return; } @@ -988,7 +919,7 @@ static void del_dbid_from_database_list(Oid dbid) { StringInfoData str; - int ret; + int ret; initStringInfo(&str); appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%u;", dbid); @@ -997,9 +928,8 @@ del_dbid_from_database_list(Oid dbid) ret = SPI_execute(str.data, false, 0); if (ret != SPI_OK_DELETE) { - ereport(ERROR, - (errmsg("[diskquota launcher] SPI_execute sql: \"%s\", errno: %d, ret_code: %d.", - str.data, errno, ret))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql: \"%s\", errno: %d, ret_code: %d.", str.data, + errno, ret))); } pfree(str.data); } @@ -1012,12 +942,10 @@ static void try_kill_db_worker(Oid dbid) { DiskQuotaWorkerEntry *hash_entry; - bool found; + bool found; LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - hash_entry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) &dbid, - HASH_REMOVE, &found); + hash_entry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_REMOVE, &found); if (found) { BackgroundWorkerHandle *handle; @@ -1039,7 +967,7 @@ static void terminate_all_workers(void) { DiskQuotaWorkerEntry *hash_entry; - HASH_SEQ_STATUS iter; + HASH_SEQ_STATUS iter; LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); @@ -1051,8 +979,7 @@ terminate_all_workers(void) */ while ((hash_entry = hash_seq_search(&iter)) != NULL) { - if (hash_entry->handle) - TerminateBackgroundWorker(hash_entry->handle); + if (hash_entry->handle) TerminateBackgroundWorker(hash_entry->handle); } LWLockRelease(diskquota_locks.worker_map_lock); } @@ -1061,13 +988,11 @@ static bool worker_create_entry(Oid dbid) { DiskQuotaWorkerEntry *workerentry = NULL; - bool found = false; + bool found = false; LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) &dbid, - HASH_ENTER, &found); + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_ENTER, &found); if (!found) { workerentry->handle = NULL; @@ -1083,13 +1008,11 @@ static bool worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) { DiskQuotaWorkerEntry *workerentry = NULL; - bool found = false; + bool found = false; LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - workerentry = (DiskQuotaWorkerEntry *) hash_search(disk_quota_worker_map, - (void *) &dbid, - HASH_ENTER, &found); + workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_ENTER, &found); if (found) { workerentry->handle = handle; @@ -1098,8 +1021,7 @@ worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) if (!found) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", - get_database_name(dbid)))); + errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(dbid)))); } return found; } @@ -1112,20 +1034,19 @@ worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) static bool start_worker_by_dboid(Oid dbid) { - BackgroundWorker worker; + BackgroundWorker worker; BackgroundWorkerHandle *handle; - BgwHandleStatus status; - MemoryContext old_ctx; - char *dbname; - pid_t pid; - bool ret; + BgwHandleStatus status; + MemoryContext old_ctx; + char *dbname; + pid_t pid; + bool ret; /* Create entry first so that it can be checked by bgworker and QD. */ worker_create_entry(dbid); memset(&worker, 0, sizeof(BackgroundWorker)); - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | - BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; /* @@ -1144,24 +1065,20 @@ start_worker_by_dboid(Oid dbid) pfree(dbname); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; - worker.bgw_main_arg = (Datum) 0; + worker.bgw_main_arg = (Datum)0; old_ctx = MemoryContextSwitchTo(TopMemoryContext); - ret = RegisterDynamicBackgroundWorker(&worker, &handle); + ret = RegisterDynamicBackgroundWorker(&worker, &handle); MemoryContextSwitchTo(old_ctx); - if (!ret) - return false; + if (!ret) return false; status = WaitForBackgroundWorkerStartup(handle, &pid); if (status == BGWH_STOPPED) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("could not start background process"), - errhint("More details may be available in the server log."))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), + errhint("More details may be available in the server log."))); if (status == BGWH_POSTMASTER_DIED) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("cannot start background processes without postmaster"), - errhint("Kill all remaining database processes and restart the database."))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("cannot start background processes without postmaster"), + errhint("Kill all remaining database processes and restart the database."))); Assert(status == BGWH_STARTED); @@ -1176,13 +1093,11 @@ start_worker_by_dboid(Oid dbid) static bool is_valid_dbid(Oid dbid) { - HeapTuple tuple; + HeapTuple tuple; - if (dbid == InvalidOid) - return false; + if (dbid == InvalidOid) return false; tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbid)); - if (!HeapTupleIsValid(tuple)) - return false; + if (!HeapTupleIsValid(tuple)) return false; ReleaseSysCache(tuple); return true; } @@ -1192,9 +1107,9 @@ worker_increase_epoch(Oid database_oid) { LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - bool found = false; - DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( - disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); + bool found = false; + DiskQuotaWorkerEntry *workerentry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&database_oid, HASH_FIND, &found); if (found) { @@ -1209,11 +1124,11 @@ worker_get_epoch(Oid database_oid) { LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - bool found = false; - uint32 epoch = 0; - DiskQuotaWorkerEntry * workerentry = (DiskQuotaWorkerEntry *) hash_search( - disk_quota_worker_map, (void *) &database_oid, HASH_FIND, &found); - + bool found = false; + uint32 epoch = 0; + DiskQuotaWorkerEntry *workerentry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&database_oid, HASH_FIND, &found); + if (found) { epoch = pg_atomic_read_u32(&(workerentry->epoch)); @@ -1222,8 +1137,7 @@ worker_get_epoch(Oid database_oid) if (!found) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", - get_database_name(database_oid)))); + errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(database_oid)))); } return epoch; } @@ -1239,32 +1153,32 @@ show_worker_epoch(PG_FUNCTION_ARGS) PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); } -static const char* diskquota_status_check_soft_limit() { +static const char * +diskquota_status_check_soft_limit() +{ // should run on coordinator only. Assert(IS_QUERY_DISPATCHER()); bool found, paused; LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; + DiskQuotaWorkerEntry *hash_entry; - hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, - (void*)&MyDatabaseId, - HASH_FIND, - &found); + hash_entry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); paused = found ? hash_entry->is_paused : false; } LWLockRelease(diskquota_locks.worker_map_lock); // if worker no booted, aka 'CREATE EXTENSION' not called, diskquota is paused - if (!found) - return "paused"; + if (!found) return "paused"; // if worker booted, check 'worker_map->is_paused' return paused ? "paused" : "enabled"; } -static const char* diskquota_status_check_hard_limit() +static const char * +diskquota_status_check_hard_limit() { // should run on coordinator only. Assert(IS_QUERY_DISPATCHER()); @@ -1274,30 +1188,29 @@ static const char* diskquota_status_check_hard_limit() bool found, paused; LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; + DiskQuotaWorkerEntry *hash_entry; - hash_entry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, - (void*)&MyDatabaseId, - HASH_FIND, - &found); + hash_entry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); paused = found ? hash_entry->is_paused : false; } LWLockRelease(diskquota_locks.worker_map_lock); // if worker booted and 'worker_map->is_paused == true' and hardlimit is enabled // hard limits should also paused - if (found && paused && hardlimit) - return "paused"; + if (found && paused && hardlimit) return "paused"; - return hardlimit ? "enabled": "disabled"; + return hardlimit ? "enabled" : "disabled"; } -static const char* diskquota_status_binary_version() +static const char * +diskquota_status_binary_version() { return DISKQUOTA_VERSION; } -static const char* diskquota_status_schema_version() +static const char * +diskquota_status_schema_version() { static char version[64] = {0}; memset(version, 0, sizeof(version)); @@ -1307,25 +1220,26 @@ static const char* diskquota_status_schema_version() ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); - if(ret != SPI_OK_SELECT || SPI_processed != 1) { + if (ret != SPI_OK_SELECT || SPI_processed != 1) + { ereport(WARNING, - (errmsg("[diskquota] when reading installed version lines %ld code = %d", - SPI_processed, ret))); + (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); goto out; } - if (SPI_processed == 0) { + if (SPI_processed == 0) + { goto out; } - bool is_null = false; - Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + bool is_null = false; + Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); Assert(is_null == false); char *vv = TextDatumGetCString(v); - if (vv == NULL) { - ereport(WARNING, - (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. may catalog corrupted"))); + if (vv == NULL) + { + ereport(WARNING, (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. may catalog corrupted"))); goto out; } @@ -1337,27 +1251,31 @@ static const char* diskquota_status_schema_version() } PG_FUNCTION_INFO_V1(diskquota_status); -Datum diskquota_status(PG_FUNCTION_ARGS) +Datum +diskquota_status(PG_FUNCTION_ARGS) { - typedef struct Context { + typedef struct Context + { int index; } Context; - typedef struct FeatureStatus { - const char* name; - const char* (*status)(void); + typedef struct FeatureStatus + { + const char *name; + const char *(*status)(void); } FeatureStatus; static const FeatureStatus fs[] = { - {.name = "soft limits", .status = diskquota_status_check_soft_limit}, - {.name = "hard limits", .status = diskquota_status_check_hard_limit}, - {.name = "current binary version", .status = diskquota_status_binary_version}, - {.name = "current schema version", .status = diskquota_status_schema_version}, + {.name = "soft limits", .status = diskquota_status_check_soft_limit}, + {.name = "hard limits", .status = diskquota_status_check_hard_limit}, + {.name = "current binary version", .status = diskquota_status_binary_version}, + {.name = "current schema version", .status = diskquota_status_schema_version}, }; FuncCallContext *funcctx; - if (SRF_IS_FIRSTCALL()) { + if (SRF_IS_FIRSTCALL()) + { funcctx = SRF_FIRSTCALL_INIT(); MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -1366,27 +1284,28 @@ Datum diskquota_status(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, 1, "name", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, 2, "status", TEXTOID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); - Context *context = (Context *)palloc(sizeof(Context)); - context->index = 0; - funcctx->user_fctx = context; + Context *context = (Context *)palloc(sizeof(Context)); + context->index = 0; + funcctx->user_fctx = context; } MemoryContextSwitchTo(oldcontext); } - funcctx = SRF_PERCALL_SETUP(); + funcctx = SRF_PERCALL_SETUP(); Context *context = (Context *)funcctx->user_fctx; - if (context->index >= sizeof(fs) / sizeof(FeatureStatus)) { + if (context->index >= sizeof(fs) / sizeof(FeatureStatus)) + { SRF_RETURN_DONE(funcctx); } - bool nulls[2] = {false, false}; - Datum v[2] = { - DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].name)), - DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].status())), - }; - ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo; - HeapTuple tuple = heap_form_tuple(rsi->expectedDesc, v, nulls); + bool nulls[2] = {false, false}; + Datum v[2] = { + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].name)), + DirectFunctionCall1(textin, CStringGetDatum(fs[context->index].status())), + }; + ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo; + HeapTuple tuple = heap_form_tuple(rsi->expectedDesc, v, nulls); context->index++; SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); @@ -1395,14 +1314,13 @@ Datum diskquota_status(PG_FUNCTION_ARGS) static bool check_for_timeout(TimestampTz start_time) { - long diff_secs = 0; - int diff_usecs = 0; + long diff_secs = 0; + int diff_usecs = 0; TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); if (diff_secs >= diskquota_worker_timeout) { - ereport(NOTICE, ( - errmsg("[diskquota] timeout when waiting for worker"), - errhint("please check if the bgworker is still alive."))); + ereport(NOTICE, (errmsg("[diskquota] timeout when waiting for worker"), + errhint("please check if the bgworker is still alive."))); return true; } return false; @@ -1416,13 +1334,12 @@ PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); Datum wait_for_worker_new_epoch(PG_FUNCTION_ARGS) { - TimestampTz start_time = GetCurrentTimestamp(); - uint32 current_epoch = worker_get_epoch(MyDatabaseId); + TimestampTz start_time = GetCurrentTimestamp(); + uint32 current_epoch = worker_get_epoch(MyDatabaseId); for (;;) { CHECK_FOR_INTERRUPTS(); - if (check_for_timeout(start_time)) - start_time = GetCurrentTimestamp(); + if (check_for_timeout(start_time)) start_time = GetCurrentTimestamp(); uint32 new_epoch = worker_get_epoch(MyDatabaseId); /* Unsigned integer underflow is OK */ if (new_epoch - current_epoch >= 2u) @@ -1430,9 +1347,7 @@ wait_for_worker_new_epoch(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } /* Sleep for naptime to reduce CPU usage */ - (void) WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT, - diskquota_naptime ? diskquota_naptime : 1); + (void)WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT, diskquota_naptime ? diskquota_naptime : 1); ResetLatch(&MyProc->procLatch); } PG_RETURN_BOOL(false); diff --git a/diskquota.h b/diskquota.h index 470bd35a5d3..3301840d16d 100644 --- a/diskquota.h +++ b/diskquota.h @@ -27,35 +27,35 @@ typedef enum ROLE_TABLESPACE_QUOTA, NUM_QUOTA_TYPES -} QuotaType; +} QuotaType; typedef enum { - FETCH_ACTIVE_OID, /* fetch active table list */ - FETCH_ACTIVE_SIZE, /* fetch size for active tables */ + FETCH_ACTIVE_OID, /* fetch active table list */ + FETCH_ACTIVE_SIZE, /* fetch size for active tables */ ADD_DB_TO_MONITOR, REMOVE_DB_FROM_BEING_MONITORED, -} FetchTableStatType; +} FetchTableStatType; typedef enum { DISKQUOTA_UNKNOWN_STATE, DISKQUOTA_READY_STATE -} DiskQuotaState; +} DiskQuotaState; struct DiskQuotaLocks { - LWLock *active_table_lock; - LWLock *black_map_lock; - LWLock *extension_ddl_message_lock; - LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ - LWLock *monitoring_dbid_cache_lock; - LWLock *relation_cache_lock; - LWLock *worker_map_lock; - LWLock *altered_reloid_cache_lock; + LWLock *active_table_lock; + LWLock *black_map_lock; + LWLock *extension_ddl_message_lock; + LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ + LWLock *monitoring_dbid_cache_lock; + LWLock *relation_cache_lock; + LWLock *worker_map_lock; + LWLock *altered_reloid_cache_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; -#define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void*)) +#define DiskQuotaLocksItemNumber (sizeof(DiskQuotaLocks) / sizeof(void *)) /* * MessageBox is used to store a message for communication between @@ -69,14 +69,14 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; */ struct ExtensionDDLMessage { - int launcher_pid; /* diskquota launcher pid */ - int req_pid; /* pid of the QD process which create/drop - * diskquota extension */ - int cmd; /* message command type, see MessageCommand */ - int result; /* message result writen by launcher, see - * MessageResult */ - int dbid; /* dbid of create/drop diskquota - * extensionstatement */ + int launcher_pid; /* diskquota launcher pid */ + int req_pid; /* pid of the QD process which create/drop + * diskquota extension */ + int cmd; /* message command type, see MessageCommand */ + int result; /* message result writen by launcher, see + * MessageResult */ + int dbid; /* dbid of create/drop diskquota + * extensionstatement */ }; enum MessageCommand @@ -103,10 +103,10 @@ enum MessageResult }; typedef struct ExtensionDDLMessage ExtensionDDLMessage; -typedef enum MessageCommand MessageCommand; -typedef enum MessageResult MessageResult; +typedef enum MessageCommand MessageCommand; +typedef enum MessageResult MessageResult; -extern DiskQuotaLocks diskquota_locks; +extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; @@ -114,9 +114,9 @@ typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; /* disk quota worker info used by launcher to manage the worker processes. */ struct DiskQuotaWorkerEntry { - Oid dbid; - pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ - bool is_paused; /* true if this worker is paused */ + Oid dbid; + pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ + bool is_paused; /* true if this worker is paused */ // NOTE: this field only can access in diskquota launcher, in other process it is dangling pointer BackgroundWorkerHandle *handle; @@ -142,23 +142,23 @@ extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); extern void init_disk_quota_hook(void); extern Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS); -extern int diskquota_naptime; -extern int diskquota_max_active_tables; -extern bool diskquota_hardlimit; - -extern int SEGCOUNT; -extern int worker_spi_get_extension_version(int *major, int *minor); -extern int get_ext_major_version(void); -extern void truncateStringInfo(StringInfo str, int nchars); -extern List *get_rel_oid_list(void); -extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); +extern int diskquota_naptime; +extern int diskquota_max_active_tables; +extern bool diskquota_hardlimit; + +extern int SEGCOUNT; +extern int worker_spi_get_extension_version(int *major, int *minor); +extern int get_ext_major_version(void); +extern void truncateStringInfo(StringInfo str, int nchars); +extern List *get_rel_oid_list(void); +extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); -extern List* diskquota_get_index_list(Oid relid); -extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); -extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); +extern List *diskquota_get_index_list(Oid relid); +extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); +extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); -extern bool worker_increase_epoch(Oid database_oid); +extern bool worker_increase_epoch(Oid database_oid); extern unsigned int worker_get_epoch(Oid database_oid); -extern bool diskquota_is_paused(void); +extern bool diskquota_is_paused(void); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index 6ac10d9e6a6..0d354fc98df 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -64,18 +64,17 @@ PG_FUNCTION_INFO_V1(relation_size_local); PG_FUNCTION_INFO_V1(pull_all_table_size); /* timeout count to wait response from launcher process, in 1/10 sec */ -#define WAIT_TIME_COUNT 1200 +#define WAIT_TIME_COUNT 1200 static object_access_hook_type next_object_access_hook; -static bool is_database_empty(void); -static void dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg); +static bool is_database_empty(void); +static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static const char *ddl_err_code_to_err_message(MessageResult code); -static int64 get_size_in_mb(char *str); -static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static int64 get_size_in_mb(char *str); +static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); +static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); -int get_ext_major_version(void); +int get_ext_major_version(void); List *get_rel_oid_list(void); /* ---- Help Functions to set quota limit. ---- */ @@ -88,26 +87,27 @@ List *get_rel_oid_list(void); Datum init_table_size_table(PG_FUNCTION_ARGS) { - int ret; - StringInfoData buf; + int ret; + StringInfoData buf; - RangeVar *rv; - Relation rel; - int extMajorVersion; + RangeVar *rv; + Relation rel; + int extMajorVersion; /* * If error happens in init_table_size_table, just return error messages * to the client side. So there is no need to catch the error. */ /* ensure table diskquota.state exists */ - rv = makeRangeVar("diskquota", "state", -1); + rv = makeRangeVar("diskquota", "state", -1); rel = heap_openrv_extended(rv, AccessShareLock, true); if (!rel) { /* configuration table is missing. */ - elog(ERROR, "table \"diskquota.state\" is missing in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)); + elog(ERROR, + "table \"diskquota.state\" is missing in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)); } heap_close(rel, NoLock); @@ -130,107 +130,97 @@ init_table_size_table(PG_FUNCTION_ARGS) initStringInfo(&buf); appendStringInfo(&buf, "truncate table diskquota.table_size;"); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UTILITY) - elog(ERROR, "cannot truncate table_size table: error code %d", ret); + if (ret != SPI_OK_UTILITY) elog(ERROR, "cannot truncate table_size table: error code %d", ret); if (extMajorVersion == 1) { resetStringInfo(&buf); - appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH all_size AS " - "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id') " - "UNION ALL SELECT diskquota.pull_all_table_size()) " - "SELECT (a).tableid, sum((a).size) FROM all_size GROUP BY (a).tableid;"); + appendStringInfo(&buf, + "INSERT INTO diskquota.table_size WITH all_size AS " + "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id') " + "UNION ALL SELECT diskquota.pull_all_table_size()) " + "SELECT (a).tableid, sum((a).size) FROM all_size GROUP BY (a).tableid;"); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into table_size table: error code %d", ret); - } - else + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); + } else { resetStringInfo(&buf); - appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH all_size AS " - "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id')) " - "SELECT (a).* FROM all_size;"); + appendStringInfo(&buf, + "INSERT INTO diskquota.table_size WITH all_size AS " + "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id')) " + "SELECT (a).* FROM all_size;"); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into table_size table: error code %d", ret); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); resetStringInfo(&buf); - /* size is the sum of size on master and on all segments when segid == -1. */ - appendStringInfo(&buf, "INSERT INTO diskquota.table_size WITH total_size AS " - "(SELECT * from diskquota.pull_all_table_size() " - "UNION ALL SELECT tableid, size, segid FROM diskquota.table_size) " - "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;"); + /* size is the sum of size on master and on all segments when segid == -1. */ + appendStringInfo(&buf, + "INSERT INTO diskquota.table_size WITH total_size AS " + "(SELECT * from diskquota.pull_all_table_size() " + "UNION ALL SELECT tableid, size, segid FROM diskquota.table_size) " + "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;"); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into table_size table: error code %d", ret); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); } /* set diskquota state to ready. */ resetStringInfo(&buf); - appendStringInfo(&buf, - "update diskquota.state set state = %u;", - DISKQUOTA_READY_STATE); + appendStringInfo(&buf, "update diskquota.state set state = %u;", DISKQUOTA_READY_STATE); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UPDATE) - elog(ERROR, "cannot update state table: error code %d", ret); + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update state table: error code %d", ret); SPI_finish(); PG_RETURN_VOID(); } -static HTAB* +static HTAB * calculate_all_table_size() { - Relation classRel; - HeapTuple tuple; - HeapScanDesc relScan; - Oid relid; - Oid prelid; - Size tablesize; - RelFileNodeBackend rnode; - TableEntryKey keyitem; - HTAB *local_table_size_map; - HASHCTL hashctl; + Relation classRel; + HeapTuple tuple; + HeapScanDesc relScan; + Oid relid; + Oid prelid; + Size tablesize; + RelFileNodeBackend rnode; + TableEntryKey keyitem; + HTAB *local_table_size_map; + HASHCTL hashctl; DiskQuotaActiveTableEntry *entry; - bool found; + bool found; memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(TableEntryKey); + hashctl.keysize = sizeof(TableEntryKey); hashctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; - local_table_size_map = hash_create("local_table_size_map", - 1024, &hashctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_table_size_map = + hash_create("local_table_size_map", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); + relScan = heap_beginscan_catalog(classRel, 0, NULL); while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW && - classForm->relkind != RELKIND_INDEX && - classForm->relkind != RELKIND_AOSEGMENTS && - classForm->relkind != RELKIND_AOBLOCKDIR && - classForm->relkind != RELKIND_AOVISIMAP && - classForm->relkind != RELKIND_TOASTVALUE) + Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); + if (classForm->relkind != RELKIND_RELATION && classForm->relkind != RELKIND_MATVIEW && + classForm->relkind != RELKIND_INDEX && classForm->relkind != RELKIND_AOSEGMENTS && + classForm->relkind != RELKIND_AOBLOCKDIR && classForm->relkind != RELKIND_AOVISIMAP && + classForm->relkind != RELKIND_TOASTVALUE) continue; relid = HeapTupleGetOid(tuple); /* ignore system table */ - if (relid < FirstNormalObjectId) - continue; + if (relid < FirstNormalObjectId) continue; - rnode.node.dbNode = MyDatabaseId; + rnode.node.dbNode = MyDatabaseId; rnode.node.relNode = classForm->relfilenode; rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; - rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; - tablesize = calculate_relation_size_all_forks(&rnode, classForm->relstorage); + rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + tablesize = calculate_relation_size_all_forks(&rnode, classForm->relstorage); keyitem.reloid = relid; - keyitem.segid = GpIdentity.segindex; + keyitem.segid = GpIdentity.segindex; prelid = diskquota_parse_primary_table_oid(classForm->relnamespace, classForm->relname.data); if (OidIsValid(prelid)) @@ -255,16 +245,17 @@ Datum pull_all_table_size(PG_FUNCTION_ARGS) { DiskQuotaActiveTableEntry *entry; - FuncCallContext *funcctx; - struct PullAllTableSizeCtx { - HASH_SEQ_STATUS iter; - HTAB *local_table_size_map; - } *table_size_ctx; + FuncCallContext *funcctx; + struct PullAllTableSizeCtx + { + HASH_SEQ_STATUS iter; + HTAB *local_table_size_map; + } * table_size_ctx; if (SRF_IS_FIRSTCALL()) { - TupleDesc tupdesc; - MemoryContext oldcontext; + TupleDesc tupdesc; + MemoryContext oldcontext; /* Create a function context for cross-call persistence. */ funcctx = SRF_FIRSTCALL_INIT(); @@ -273,37 +264,37 @@ pull_all_table_size(PG_FUNCTION_ARGS) oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); tupdesc = CreateTemplateTupleDesc(3, false /*hasoid*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "TABLEID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "SIZE", INT8OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "SEGID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLEID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "SIZE", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "SEGID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* Create a local hash table and fill it with entries from shared memory. */ - table_size_ctx = (struct PullAllTableSizeCtx *) palloc(sizeof(struct PullAllTableSizeCtx)); + table_size_ctx = (struct PullAllTableSizeCtx *)palloc(sizeof(struct PullAllTableSizeCtx)); table_size_ctx->local_table_size_map = calculate_all_table_size(); /* Setup first calling context. */ hash_seq_init(&(table_size_ctx->iter), table_size_ctx->local_table_size_map); - funcctx->user_fctx = (void *) table_size_ctx; + funcctx->user_fctx = (void *)table_size_ctx; MemoryContextSwitchTo(oldcontext); } - funcctx = SRF_PERCALL_SETUP(); - table_size_ctx = (struct PullAllTableSizeCtx *) funcctx->user_fctx; - + funcctx = SRF_PERCALL_SETUP(); + table_size_ctx = (struct PullAllTableSizeCtx *)funcctx->user_fctx; + while ((entry = hash_seq_search(&(table_size_ctx->iter))) != NULL) { - Datum result; - Datum values[3]; - bool nulls[3]; - HeapTuple tuple; + Datum result; + Datum values[3]; + bool nulls[3]; + HeapTuple tuple; values[0] = ObjectIdGetDatum(entry->reloid); values[1] = Int64GetDatum(entry->tablesize); values[2] = Int16GetDatum(entry->segid); memset(nulls, false, sizeof(nulls)); - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); @@ -329,30 +320,26 @@ diskquota_start_worker(PG_FUNCTION_ARGS) LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); extension_ddl_message->req_pid = MyProcPid; - extension_ddl_message->cmd = CMD_CREATE_EXTENSION; - extension_ddl_message->result = ERR_PENDING; - extension_ddl_message->dbid = MyDatabaseId; - launcher_pid = extension_ddl_message->launcher_pid; + extension_ddl_message->cmd = CMD_CREATE_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + launcher_pid = extension_ddl_message->launcher_pid; /* setup sig handler to diskquota launcher process */ rc = kill(launcher_pid, SIGUSR1); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { - int count = WAIT_TIME_COUNT; + int count = WAIT_TIME_COUNT; while (count-- > 0) { CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - 100L); - if (rc & WL_POSTMASTER_DEATH) - break; + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - ERROR, - (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); + ERROR, (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) @@ -368,7 +355,8 @@ diskquota_start_worker(PG_FUNCTION_ARGS) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); - elog(ERROR, "[diskquota] failed to create diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); + elog(ERROR, "[diskquota] failed to create diskquota extension: %s", + ddl_err_code_to_err_message((MessageResult)extension_ddl_message->result)); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); @@ -376,7 +364,9 @@ diskquota_start_worker(PG_FUNCTION_ARGS) /* notify DBA to run init_table_size_table() when db is not empty */ if (!is_database_empty()) { - ereport(WARNING, (errmsg("database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time."))); + ereport(WARNING, (errmsg("database is not empty, please run `select diskquota.init_table_size_table()` to " + "initialize table_size information for diskquota extension. Note that for large " + "database, this function may take a long time."))); } PG_RETURN_VOID(); } @@ -387,15 +377,17 @@ diskquota_start_worker(PG_FUNCTION_ARGS) static void dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) { - CdbPgResults cdb_pgresults = {NULL, 0}; - int i; + CdbPgResults cdb_pgresults = {NULL, 0}; + int i; StringInfoData sql; initStringInfo(&sql); appendStringInfo(&sql, "SELECT diskquota.%s", pause_extension ? "pause" : "resume"); - if (dbid == InvalidOid) { + if (dbid == InvalidOid) + { appendStringInfo(&sql, "()"); - } else { + } else + { appendStringInfo(&sql, "(%d)", dbid); } CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); @@ -406,10 +398,8 @@ dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("[diskquota] %s extension on segments, encounter unexpected result from segment: %d", - pause_extension ? "pausing" : "resuming", - PQresultStatus(pgresult)))); + ereport(ERROR, (errmsg("[diskquota] %s extension on segments, encounter unexpected result from segment: %d", + pause_extension ? "pausing" : "resuming", PQresultStatus(pgresult)))); } } cdbdisp_clearCdbPgResults(&cdb_pgresults); @@ -425,37 +415,33 @@ diskquota_pause(PG_FUNCTION_ARGS) { if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to pause diskquota"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to pause diskquota"))); } Oid dbid = MyDatabaseId; - if (PG_NARGS() == 1) { + if (PG_NARGS() == 1) + { dbid = PG_GETARG_OID(0); } // pause current worker LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); { - bool found; + bool found; DiskQuotaWorkerEntry *hentry; - hentry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, - (void*)&dbid, - // segment dose not boot the worker - // this will add new element on segment - // delete this element in diskquota_resume() - HASH_ENTER, - &found); + hentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, + // segment dose not boot the worker + // this will add new element on segment + // delete this element in diskquota_resume() + HASH_ENTER, &found); hentry->is_paused = true; } LWLockRelease(diskquota_locks.worker_map_lock); if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, - true /* pause_extension */); + dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, true /* pause_extension */); PG_RETURN_VOID(); } @@ -469,41 +455,38 @@ diskquota_resume(PG_FUNCTION_ARGS) { if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to resume diskquota"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to resume diskquota"))); } Oid dbid = MyDatabaseId; - if (PG_NARGS() == 1) { + if (PG_NARGS() == 1) + { dbid = PG_GETARG_OID(0); } // active current worker LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); { - bool found; + bool found; DiskQuotaWorkerEntry *hentry; - hentry = (DiskQuotaWorkerEntry*) hash_search(disk_quota_worker_map, - (void*)&dbid, - HASH_FIND, - &found); - if (found) { + hentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_FIND, &found); + if (found) + { hentry->is_paused = false; } // remove the element since we do not need any more // ref diskquota_pause() - if (found && hentry->handle == NULL) { - hash_search(disk_quota_worker_map, (void*)&dbid, HASH_REMOVE, &found); + if (found && hentry->handle == NULL) + { + hash_search(disk_quota_worker_map, (void *)&dbid, HASH_REMOVE, &found); } } LWLockRelease(diskquota_locks.worker_map_lock); if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, - false /* pause_extension */); + dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, false /* pause_extension */); PG_RETURN_VOID(); } @@ -514,14 +497,15 @@ diskquota_resume(PG_FUNCTION_ARGS) static bool is_database_empty(void) { - int ret; + int ret; StringInfoData buf; - TupleDesc tupdesc; - bool is_empty = false; + TupleDesc tupdesc; + bool is_empty = false; initStringInfo(&buf); appendStringInfo(&buf, - "SELECT (count(relname) = 0) FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'"); + "SELECT (count(relname) = 0) FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and " + "relnamespace = n.oid and nspname != 'diskquota'"); /* * If error happens in is_database_empty, just return error messages to @@ -530,15 +514,14 @@ is_database_empty(void) SPI_connect(); ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select pg_class and pg_namespace table: error code %d", errno); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select pg_class and pg_namespace table: error code %d", errno); tupdesc = SPI_tuptable->tupdesc; /* check sql return value whether database is empty */ if (SPI_processed > 0) { - HeapTuple tup = SPI_tuptable->vals[0]; - Datum dat; - bool isnull; + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (!isnull) @@ -562,10 +545,11 @@ void register_diskquota_object_access_hook(void) { next_object_access_hook = object_access_hook; - object_access_hook = dq_object_access_hook; + object_access_hook = dq_object_access_hook; } -static void dq_object_access_hook_on_drop(void) +static void +dq_object_access_hook_on_drop(void) { int rc, launcher_pid; @@ -587,29 +571,25 @@ static void dq_object_access_hook_on_drop(void) LWLockAcquire(diskquota_locks.extension_ddl_lock, LW_EXCLUSIVE); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); extension_ddl_message->req_pid = MyProcPid; - extension_ddl_message->cmd = CMD_DROP_EXTENSION; - extension_ddl_message->result = ERR_PENDING; - extension_ddl_message->dbid = MyDatabaseId; - launcher_pid = extension_ddl_message->launcher_pid; - rc = kill(launcher_pid, SIGUSR1); + extension_ddl_message->cmd = CMD_DROP_EXTENSION; + extension_ddl_message->result = ERR_PENDING; + extension_ddl_message->dbid = MyDatabaseId; + launcher_pid = extension_ddl_message->launcher_pid; + rc = kill(launcher_pid, SIGUSR1); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { - int count = WAIT_TIME_COUNT; + int count = WAIT_TIME_COUNT; while (count-- > 0) { CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - 100L); - if (rc & WL_POSTMASTER_DEATH) - break; + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - ERROR, - (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); + ERROR, (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) @@ -625,7 +605,8 @@ static void dq_object_access_hook_on_drop(void) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); - elog(ERROR, "[diskquota launcher] failed to drop diskquota extension: %s", ddl_err_code_to_err_message((MessageResult) extension_ddl_message->result)); + elog(ERROR, "[diskquota launcher] failed to drop diskquota extension: %s", + ddl_err_code_to_err_message((MessageResult)extension_ddl_message->result)); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); @@ -636,16 +617,14 @@ static void dq_object_access_hook_on_drop(void) * DROP: will send CMD_DROP_EXTENSION to diskquota laucher */ static void -dq_object_access_hook(ObjectAccessType access, Oid classId, - Oid objectId, int subId, void *arg) +dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) { - if (classId != ExtensionRelationId) - goto out; + if (classId != ExtensionRelationId) goto out; - if (get_extension_oid("diskquota", true) != objectId) - goto out; + if (get_extension_oid("diskquota", true) != objectId) goto out; - switch(access) { + switch (access) + { case OAT_DROP: dq_object_access_hook_on_drop(); break; @@ -657,9 +636,7 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, } out: - if (next_object_access_hook) - (*next_object_access_hook) (access, classId, objectId, - subId, arg); + if (next_object_access_hook) (*next_object_access_hook)(access, classId, objectId, subId, arg); } /* @@ -691,38 +668,33 @@ ddl_err_code_to_err_message(MessageResult code) } } - /* * Set disk quota limit for role. */ Datum set_role_quota(PG_FUNCTION_ARGS) { - Oid roleoid; - char *rolname; - char *sizestr; - int64 quota_limit_mb; + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); } rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); roleoid = get_role_oid(rolname, false); - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); if (quota_limit_mb == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("disk quota can not be set to 0 MB"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA); PG_RETURN_VOID(); @@ -734,31 +706,27 @@ set_role_quota(PG_FUNCTION_ARGS) Datum set_schema_quota(PG_FUNCTION_ARGS) { - Oid namespaceoid; - char *nspname; - char *sizestr; - int64 quota_limit_mb; + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); } - nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); namespaceoid = get_namespace_oid(nspname, false); - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); if (quota_limit_mb == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("disk quota can not be set to 0 MB"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); PG_RETURN_VOID(); @@ -770,22 +738,20 @@ set_schema_quota(PG_FUNCTION_ARGS) Datum set_role_tablespace_quota(PG_FUNCTION_ARGS) { -/* - * Write the quota limit info into target and quota_config table under - * 'diskquota' schema of the current database. - */ - Oid spcoid; - char *spcname; - Oid roleoid; - char *rolname; - char *sizestr; - int64 quota_limit_mb; - + /* + * Write the quota limit info into target and quota_config table under + * 'diskquota' schema of the current database. + */ + Oid spcoid; + char *spcname; + Oid roleoid; + char *rolname; + char *sizestr; + int64 quota_limit_mb; + if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); } rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); @@ -794,17 +760,14 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = get_tablespace_oid(spcname, false); - - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); if (quota_limit_mb == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("disk quota can not be set to 0 MB"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); @@ -818,41 +781,36 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) Datum set_schema_tablespace_quota(PG_FUNCTION_ARGS) { -/* - * Write the quota limit info into target and quota_config table under - * 'diskquota' schema of the current database. - */ - Oid spcoid; - char *spcname; - Oid namespaceoid; - char *nspname; - char *sizestr; - int64 quota_limit_mb; - + /* + * Write the quota limit info into target and quota_config table under + * 'diskquota' schema of the current database. + */ + Oid spcoid; + char *spcname; + Oid namespaceoid; + char *nspname; + char *sizestr; + int64 quota_limit_mb; + if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); } - nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); + nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); namespaceoid = get_namespace_oid(nspname, false); spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = get_tablespace_oid(spcname, false); - - sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); - sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); + sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); + sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); if (quota_limit_mb == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("disk quota can not be set to 0 MB"))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); @@ -863,14 +821,14 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) { - int ret; + int ret; StringInfoData buf; initStringInfo(&buf); appendStringInfo(&buf, - "select true from diskquota.quota_config where targetoid = %u" - " and quotatype =%d", - targetoid, type); + "select true from diskquota.quota_config where targetoid = %u" + " and quotatype =%d", + targetoid, type); /* * If error happens in set_quota_config_internal, just return error messages to @@ -879,41 +837,34 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) SPI_connect(); ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select quota setting table: error code %d", ret); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); /* if the schema or role's quota has been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { resetStringInfo(&buf); - appendStringInfo(&buf, - "insert into diskquota.quota_config values(%u,%d,%ld);", - targetoid, type, quota_limit_mb); + appendStringInfo(&buf, "insert into diskquota.quota_config values(%u,%d,%ld);", targetoid, type, + quota_limit_mb); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb < 0) + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } else if (SPI_processed > 0 && quota_limit_mb < 0) { resetStringInfo(&buf); appendStringInfo(&buf, - "delete from diskquota.quota_config where targetoid=%u" - " and quotatype=%d;", - targetoid, type); + "delete from diskquota.quota_config where targetoid=%u" + " and quotatype=%d;", + targetoid, type); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_DELETE) - elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb > 0) + if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + } else if (SPI_processed > 0 && quota_limit_mb > 0) { resetStringInfo(&buf); appendStringInfo(&buf, - "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" - " and quotatype=%d;", - quota_limit_mb, targetoid, type); + "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" + " and quotatype=%d;", + quota_limit_mb, targetoid, type); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UPDATE) - elog(ERROR, "cannot update quota setting table, error code %d", ret); + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); } /* @@ -926,18 +877,18 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type) { - int ret; + int ret; StringInfoData buf; initStringInfo(&buf); appendStringInfo(&buf, - "select true from diskquota.quota_config as q, diskquota.target as t" - " where t.primaryOid = %u" - " and t.tablespaceOid=%u" - " and t.quotaType=%d" - " and t.quotaType=q.quotaType" - " and t.primaryOid=q.targetOid;", - primaryoid, spcoid, type); + "select true from diskquota.quota_config as q, diskquota.target as t" + " where t.primaryOid = %u" + " and t.tablespaceOid=%u" + " and t.quotaType=%d" + " and t.quotaType=q.quotaType" + " and t.primaryOid=q.targetOid;", + primaryoid, spcoid, type); /* * If error happens in set_quota_config_internal, just return error messages to @@ -946,30 +897,24 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType SPI_connect(); ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select target setting table: error code %d", ret); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target setting table: error code %d", ret); /* if the schema or role's quota has been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { resetStringInfo(&buf); - appendStringInfo(&buf, - "insert into diskquota.target values(%d,%u,%u)", - type, primaryoid, spcoid); + appendStringInfo(&buf, "insert into diskquota.target values(%d,%u,%u)", type, primaryoid, spcoid); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) - elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb < 0) + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } else if (SPI_processed > 0 && quota_limit_mb < 0) { resetStringInfo(&buf); appendStringInfo(&buf, - "delete from diskquota.target where primaryOid=%u" - " and tablespaceOid=%u;", - primaryoid, spcoid); + "delete from diskquota.target where primaryOid=%u" + " and tablespaceOid=%u;", + primaryoid, spcoid); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_DELETE) - elog(ERROR, "cannot delete item from target setting table, error code %d", ret); + if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from target setting table, error code %d", ret); } /* @@ -985,67 +930,58 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType static int64 get_size_in_mb(char *str) { - char *strptr, - *endptr; - char saved_char; - Numeric num; - int64 result; - bool have_digits = false; + char *strptr, *endptr; + char saved_char; + Numeric num; + int64 result; + bool have_digits = false; /* Skip leading whitespace */ strptr = str; - while (isspace((unsigned char) *strptr)) - strptr++; + while (isspace((unsigned char)*strptr)) strptr++; /* Check that we have a valid number and determine where it ends */ endptr = strptr; /* Part (1): sign */ - if (*endptr == '-' || *endptr == '+') - endptr++; + if (*endptr == '-' || *endptr == '+') endptr++; /* Part (2): main digit string */ - if (isdigit((unsigned char) *endptr)) + if (isdigit((unsigned char)*endptr)) { have_digits = true; - do - endptr++; - while (isdigit((unsigned char) *endptr)); + do endptr++; + while (isdigit((unsigned char)*endptr)); } /* Part (3): optional decimal point and fractional digits */ if (*endptr == '.') { endptr++; - if (isdigit((unsigned char) *endptr)) + if (isdigit((unsigned char)*endptr)) { have_digits = true; - do - endptr++; - while (isdigit((unsigned char) *endptr)); + do endptr++; + while (isdigit((unsigned char)*endptr)); } } /* Complain if we don't have a valid number at this point */ - if (!have_digits) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid size: \"%s\"", str))); + if (!have_digits) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid size: \"%s\"", str))); /* Part (4): optional exponent */ if (*endptr == 'e' || *endptr == 'E') { - long exponent; - char *cp; + long exponent; + char *cp; /* * Note we might one day support EB units, so if what follows 'E' * isn't a number, just treat it all as a unit to be parsed. */ exponent = strtol(endptr + 1, &cp, 10); - (void) exponent; /* Silence -Wunused-result warnings */ - if (cp > endptr + 1) - endptr = cp; + (void)exponent; /* Silence -Wunused-result warnings */ + if (cp > endptr + 1) endptr = cp; } /* @@ -1053,67 +989,57 @@ get_size_in_mb(char *str) * character of the unit string. */ saved_char = *endptr; - *endptr = '\0'; + *endptr = '\0'; - num = DatumGetNumeric(DirectFunctionCall3(numeric_in, - CStringGetDatum(strptr), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); + num = DatumGetNumeric( + DirectFunctionCall3(numeric_in, CStringGetDatum(strptr), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); *endptr = saved_char; /* Skip whitespace between number and unit */ strptr = endptr; - while (isspace((unsigned char) *strptr)) - strptr++; + while (isspace((unsigned char)*strptr)) strptr++; /* Handle possible unit */ if (*strptr != '\0') { - int64 multiplier = 0; + int64 multiplier = 0; /* Trim any trailing whitespace */ endptr = str + strlen(str) - 1; - while (isspace((unsigned char) *endptr)) - endptr--; + while (isspace((unsigned char)*endptr)) endptr--; endptr++; *endptr = '\0'; /* Parse the unit case-insensitively */ if (pg_strcasecmp(strptr, "mb") == 0) - multiplier = ((int64) 1); + multiplier = ((int64)1); else if (pg_strcasecmp(strptr, "gb") == 0) - multiplier = ((int64) 1024); + multiplier = ((int64)1024); else if (pg_strcasecmp(strptr, "tb") == 0) - multiplier = ((int64) 1024) * 1024; + multiplier = ((int64)1024) * 1024; else if (pg_strcasecmp(strptr, "pb") == 0) - multiplier = ((int64) 1024) * 1024 * 1024; + multiplier = ((int64)1024) * 1024 * 1024; else - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid size: \"%s\"", str), - errdetail("Invalid size unit: \"%s\".", strptr), - errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid size: \"%s\"", str), + errdetail("Invalid size unit: \"%s\".", strptr), + errhint("Valid units are \"MB\", \"GB\", \"TB\", and \"PB\"."))); if (multiplier > 1) { - Numeric mul_num; + Numeric mul_num; - mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(multiplier))); + mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, Int64GetDatum(multiplier))); - num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, - NumericGetDatum(mul_num), - NumericGetDatum(num))); + num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, NumericGetDatum(mul_num), NumericGetDatum(num))); } } - result = DatumGetInt64(DirectFunctionCall1(numeric_int8, - NumericGetDatum(num))); + result = DatumGetInt64(DirectFunctionCall1(numeric_int8, NumericGetDatum(num))); return result; } @@ -1125,30 +1051,27 @@ get_size_in_mb(char *str) void update_diskquota_db_list(Oid dbid, HASHACTION action) { - bool found = false; + bool found = false; /* add/remove the dbid to monitoring database cache to filter out table not under - * monitoring in hook functions - */ + * monitoring in hook functions + */ LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); if (action == HASH_ENTER) - { + { Oid *entry = NULL; - entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); if (entry == NULL) { - ereport(WARNING, - (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); + ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } - } - else if (action == HASH_REMOVE) + } else if (action == HASH_REMOVE) { hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); if (!found) { - ereport(WARNING, - (errmsg("cannot remove the database from db list, dbid not found"))); + ereport(WARNING, (errmsg("cannot remove the database from db list, dbid not found"))); } } LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); @@ -1160,60 +1083,60 @@ update_diskquota_db_list(Oid dbid, HASHACTION action) Datum set_per_segment_quota(PG_FUNCTION_ARGS) { - int ret; - Oid spcoid; - char *spcname; - float4 ratio; + int ret; + Oid spcoid; + char *spcname; + float4 ratio; if (!superuser()) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to set disk quota limit"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); } spcname = text_to_cstring(PG_GETARG_TEXT_PP(0)); spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = get_tablespace_oid(spcname, false); ratio = PG_GETARG_FLOAT4(1); if (ratio == 0) { ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("per segment quota ratio can not be set to 0"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("per segment quota ratio can not be set to 0"))); } StringInfoData buf; if (SPI_OK_CONNECT != SPI_connect()) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query"))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("unable to connect to execute internal query"))); } /* Get all targetOid which are related to this tablespace, and saved into rowIds */ initStringInfo(&buf); - appendStringInfo(&buf, - "SELECT true FROM diskquota.target as t, diskquota.quota_config as q WHERE tablespaceOid = %u AND (t.quotaType = %d OR t.quotaType = %d) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", spcoid, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA); + appendStringInfo( + &buf, + "SELECT true FROM diskquota.target as t, diskquota.quota_config as q WHERE tablespaceOid = %u AND " + "(t.quotaType = %d OR t.quotaType = %d) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", + spcoid, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA); ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select target and quota setting table: error code %d", ret); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target and quota setting table: error code %d", ret); if (SPI_processed <= 0) { - ereport(ERROR, - (errmsg("there are no roles or schema quota configed for this tablespace: %s, can't config per segment ratio for it", spcname))); + ereport(ERROR, (errmsg("there are no roles or schema quota configed for this tablespace: %s, can't config per " + "segment ratio for it", + spcname))); } resetStringInfo(&buf); appendStringInfo(&buf, - "UPDATE diskquota.quota_config AS q set segratio = %f FROM diskquota.target AS t WHERE q.targetOid = t.primaryOid AND (t.quotaType = %d OR t.quotaType = %d) AND t.quotaType = q.quotaType And t.tablespaceOid = %d", ratio, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA, spcoid); + "UPDATE diskquota.quota_config AS q set segratio = %f FROM diskquota.target AS t WHERE " + "q.targetOid = t.primaryOid AND (t.quotaType = %d OR t.quotaType = %d) AND t.quotaType = " + "q.quotaType And t.tablespaceOid = %d", + ratio, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA, spcoid); /* * UPDATEA NAMESPACE_TABLESPACE_PERSEG_QUOTA AND ROLE_TABLESPACE_PERSEG_QUOTA config for this tablespace */ ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_UPDATE) - elog(ERROR, "cannot update item from quota setting table, error code %d", ret); + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update item from quota setting table, error code %d", ret); /* * And finish our transaction. */ @@ -1221,7 +1144,8 @@ set_per_segment_quota(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -int worker_spi_get_extension_version(int *major, int *minor) +int +worker_spi_get_extension_version(int *major, int *minor) { StartTransactionCommand(); int ret = SPI_connect(); @@ -1230,36 +1154,38 @@ int worker_spi_get_extension_version(int *major, int *minor) ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); - if (SPI_processed == 0) { + if (SPI_processed == 0) + { ret = -1; goto out; } - if(ret != SPI_OK_SELECT || SPI_processed != 1) { + if (ret != SPI_OK_SELECT || SPI_processed != 1) + { ereport(WARNING, - (errmsg("[diskquota] when reading installed version lines %ld code = %d", - SPI_processed, ret))); + (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); return -1; } - bool is_null = false; - Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + bool is_null = false; + Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); Assert(is_null == false); - char *version = TextDatumGetCString(v); - if (version == NULL) { + char *version = TextDatumGetCString(v); + if (version == NULL) + { ereport(WARNING, - (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. catalog might be corrupted"))); + (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. catalog might be corrupted"))); return -1; } ret = sscanf(version, "%d.%d", major, minor); - if (ret != 2) { - ereport(WARNING, - (errmsg("[diskquota] 'extversion' is '%s' in pg_class.pg_extension which is not valid format. " - "catalog might be corrupted", - version))); + if (ret != 2) + { + ereport(WARNING, (errmsg("[diskquota] 'extversion' is '%s' in pg_class.pg_extension which is not valid format. " + "catalog might be corrupted", + version))); return -1; } @@ -1280,38 +1206,32 @@ int worker_spi_get_extension_version(int *major, int *minor) int get_ext_major_version(void) { - int ret; - TupleDesc tupdesc; - HeapTuple tup; - Datum dat; - bool isnull; - char *extversion; + int ret; + TupleDesc tupdesc; + HeapTuple tup; + Datum dat; + bool isnull; + char *extversion; ret = SPI_execute("select COALESCE(extversion,'') from pg_extension where extname = 'diskquota'", true, 0); if (ret != SPI_OK_SELECT) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 1 || - ((tupdesc)->attrs[0])->atttypid != TEXTOID || SPI_processed != 1) + if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != TEXTOID || SPI_processed != 1) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] can not get diskquota extesion version"))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] can not get diskquota extesion version"))); } tup = SPI_tuptable->vals[0]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); if (isnull) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] can not get diskquota extesion version"))); - extversion = TextDatumGetCString(dat); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] can not get diskquota extesion version"))); + extversion = TextDatumGetCString(dat); if (extversion) { - return (int)strtol(extversion, (char **) NULL, 10); + return (int)strtol(extversion, (char **)NULL, 10); } return 0; } @@ -1327,38 +1247,37 @@ get_ext_major_version(void) List * get_rel_oid_list(void) { - List *oidlist = NIL; - StringInfoData buf; - int ret; + List *oidlist = NIL; + StringInfoData buf; + int ret; initStringInfo(&buf); appendStringInfo(&buf, - "select oid " - " from pg_class" - " where oid >= %u and (relkind='r' or relkind='m')", - FirstNormalObjectId); + "select oid " + " from pg_class" + " where oid >= %u and (relkind='r' or relkind='m')", + FirstNormalObjectId); ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot fetch in pg_class. error code %d", ret); + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); TupleDesc tupdesc = SPI_tuptable->tupdesc; - for(int i = 0; i < SPI_processed; i++) + for (int i = 0; i < SPI_processed; i++) { - HeapTuple tup; - bool isnull; - Oid oid; - ListCell *l; + HeapTuple tup; + bool isnull; + Oid oid; + ListCell *l; tup = SPI_tuptable->vals[i]; - oid = DatumGetObjectId(SPI_getbinval(tup,tupdesc, 1, &isnull)); + oid = DatumGetObjectId(SPI_getbinval(tup, tupdesc, 1, &isnull)); if (!isnull) { List *indexIds; - oidlist = lappend_oid(oidlist, oid); + oidlist = lappend_oid(oidlist, oid); indexIds = diskquota_get_index_list(oid); - if (indexIds != NIL ) + if (indexIds != NIL) { - foreach(l, indexIds) + foreach (l, indexIds) { oidlist = lappend_oid(oidlist, lfirst_oid(l)); } @@ -1378,8 +1297,8 @@ typedef struct static bool relation_file_stat(int segno, void *ctx) { - RelationFileStatCtx *stat_ctx = (RelationFileStatCtx *)ctx; - char file_path[MAXPGPATH] = {0}; + RelationFileStatCtx *stat_ctx = (RelationFileStatCtx *)ctx; + char file_path[MAXPGPATH] = {0}; if (segno == 0) snprintf(file_path, MAXPGPATH, "%s", stat_ctx->relation_path); else @@ -1389,9 +1308,7 @@ relation_file_stat(int segno, void *ctx) if (stat(file_path, &fst) < 0) { if (errno != ENOENT) - ereport(WARNING, - (errcode_for_file_access(), - errmsg("[diskquota] could not stat file %s: %m", file_path))); + ereport(WARNING, (errcode_for_file_access(), errmsg("[diskquota] could not stat file %s: %m", file_path))); return false; } stat_ctx->size += fst.st_size; @@ -1405,8 +1322,8 @@ relation_file_stat(int segno, void *ctx) int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) { - int64 totalsize = 0; - ForkNumber forkNum; + int64 totalsize = 0; + ForkNumber forkNum; unsigned int segno = 0; if (relstorage == RELSTORAGE_HEAP) @@ -1414,22 +1331,20 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) { RelationFileStatCtx ctx = {0}; - ctx.relation_path = relpathbackend(rnode->node, rnode->backend, forkNum); - ctx.size = 0; - for (segno = 0; ; segno++) + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, forkNum); + ctx.size = 0; + for (segno = 0;; segno++) { - if (!relation_file_stat(segno, &ctx)) - break; + if (!relation_file_stat(segno, &ctx)) break; } totalsize += ctx.size; } return totalsize; - } - else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) + } else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) { RelationFileStatCtx ctx = {0}; - ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); - ctx.size = 0; + ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); + ctx.size = 0; /* * Since the extension file with (segno=0, column=1) is not traversed by * ao_foreach_extent_file(), we need to handle the size of it additionally. @@ -1438,8 +1353,7 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) relation_file_stat(0, &ctx); ao_foreach_extent_file(relation_file_stat, &ctx); return ctx.size; - } - else + } else { return 0; } @@ -1448,17 +1362,17 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) Datum relation_size_local(PG_FUNCTION_ARGS) { - Oid reltablespace = PG_GETARG_OID(0); - Oid relfilenode = PG_GETARG_OID(1); - char relpersistence = PG_GETARG_CHAR(2); - char relstorage = PG_GETARG_CHAR(3); - RelFileNodeBackend rnode = {0}; - int64 size = 0; - - rnode.node.dbNode = MyDatabaseId; + Oid reltablespace = PG_GETARG_OID(0); + Oid relfilenode = PG_GETARG_OID(1); + char relpersistence = PG_GETARG_CHAR(2); + char relstorage = PG_GETARG_CHAR(3); + RelFileNodeBackend rnode = {0}; + int64 size = 0; + + rnode.node.dbNode = MyDatabaseId; rnode.node.relNode = relfilenode; rnode.node.spcNode = OidIsValid(reltablespace) ? reltablespace : MyDatabaseTableSpace; - rnode.backend = relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + rnode.backend = relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; size = calculate_relation_size_all_forks(&rnode, relstorage); @@ -1469,17 +1383,17 @@ Relation diskquota_relation_open(Oid relid, LOCKMODE mode) { Relation rel; - bool success_open = false; - int32 SavedInterruptHoldoffCount = InterruptHoldoffCount; + bool success_open = false; + int32 SavedInterruptHoldoffCount = InterruptHoldoffCount; PG_TRY(); { - rel = relation_open(relid, mode); + rel = relation_open(relid, mode); success_open = true; } PG_CATCH(); { - InterruptHoldoffCount = SavedInterruptHoldoffCount; + InterruptHoldoffCount = SavedInterruptHoldoffCount; HOLD_INTERRUPTS(); FlushErrorState(); RESUME_INTERRUPTS(); @@ -1489,28 +1403,24 @@ diskquota_relation_open(Oid relid, LOCKMODE mode) return success_open ? rel : NULL; } -List* +List * diskquota_get_index_list(Oid relid) { - Relation indrel; + Relation indrel; SysScanDesc indscan; ScanKeyData skey; - HeapTuple htup; - List *result = NIL; + HeapTuple htup; + List *result = NIL; /* Prepare to scan pg_index for entries having indrelid = this rel. */ - ScanKeyInit(&skey, - Anum_pg_index_indrelid, - BTEqualStrategyNumber, F_OIDEQ, - relid); + ScanKeyInit(&skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, relid); - indrel = heap_open(IndexRelationId, AccessShareLock); - indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, - NULL, 1, &skey); + indrel = heap_open(IndexRelationId, AccessShareLock); + indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, NULL, 1, &skey); while (HeapTupleIsValid(htup = systable_getnext(indscan))) { - Form_pg_index index = (Form_pg_index) GETSTRUCT(htup); + Form_pg_index index = (Form_pg_index)GETSTRUCT(htup); /* * Ignore any indexes that are currently being dropped. This will @@ -1518,8 +1428,7 @@ diskquota_get_index_list(Oid relid) * HOT-safety decisions. It's unsafe to touch such an index at all * since its catalog entries could disappear at any instant. */ - if (!IndexIsLive(index)) - continue; + if (!IndexIsLive(index)) continue; /* Add index's OID to result list in the proper order */ result = lappend_oid(result, index->indexrelid); @@ -1538,48 +1447,36 @@ diskquota_get_index_list(Oid relid) void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid) { - ScanKeyData skey; - SysScanDesc scan; - TupleDesc tupDesc; - Relation aorel; - HeapTuple htup; - Datum auxoid; - bool isnull; - - ScanKeyInit(&skey, Anum_pg_appendonly_relid, - BTEqualStrategyNumber, F_OIDEQ, reloid); - aorel = heap_open(AppendOnlyRelationId, AccessShareLock); + ScanKeyData skey; + SysScanDesc scan; + TupleDesc tupDesc; + Relation aorel; + HeapTuple htup; + Datum auxoid; + bool isnull; + + ScanKeyInit(&skey, Anum_pg_appendonly_relid, BTEqualStrategyNumber, F_OIDEQ, reloid); + aorel = heap_open(AppendOnlyRelationId, AccessShareLock); tupDesc = RelationGetDescr(aorel); - scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, - true /*indexOk*/, NULL /*snapshot*/, - 1 /*nkeys*/, &skey); + scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, true /*indexOk*/, NULL /*snapshot*/, 1 /*nkeys*/, &skey); while (HeapTupleIsValid(htup = systable_getnext(scan))) { if (segrelid) { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_segrelid, - tupDesc, &isnull); - if (!isnull) - *segrelid = DatumGetObjectId(auxoid); + auxoid = heap_getattr(htup, Anum_pg_appendonly_segrelid, tupDesc, &isnull); + if (!isnull) *segrelid = DatumGetObjectId(auxoid); } if (blkdirrelid) { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_blkdirrelid, - tupDesc, &isnull); - if (!isnull) - *blkdirrelid = DatumGetObjectId(auxoid); + auxoid = heap_getattr(htup, Anum_pg_appendonly_blkdirrelid, tupDesc, &isnull); + if (!isnull) *blkdirrelid = DatumGetObjectId(auxoid); } if (visimaprelid) { - auxoid = heap_getattr(htup, - Anum_pg_appendonly_visimaprelid, - tupDesc, &isnull); - if (!isnull) - *visimaprelid = DatumGetObjectId(auxoid); + auxoid = heap_getattr(htup, Anum_pg_appendonly_visimaprelid, tupDesc, &isnull); + if (!isnull) *visimaprelid = DatumGetObjectId(auxoid); } } @@ -1593,11 +1490,9 @@ diskquota_parse_primary_table_oid(Oid namespace, char *relname) switch (namespace) { case PG_TOAST_NAMESPACE: - if (strncmp(relname, "pg_toast", 8) == 0) - return atoi(&relname[9]); - break; - case PG_AOSEGMENT_NAMESPACE: - { + if (strncmp(relname, "pg_toast", 8) == 0) return atoi(&relname[9]); + break; + case PG_AOSEGMENT_NAMESPACE: { if (strncmp(relname, "pg_aoseg", 8) == 0) return atoi(&relname[9]); else if (strncmp(relname, "pg_aovisimap", 12) == 0) diff --git a/enforcement.c b/enforcement.c index 48df213a671..3ede06b5808 100644 --- a/enforcement.c +++ b/enforcement.c @@ -33,7 +33,7 @@ init_disk_quota_enforcement(void) { /* enforcement hook before query is loading data */ prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; - ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; + ExecutorCheckPerms_hook = quota_check_ExecCheckRTPerms; } /* @@ -43,24 +43,22 @@ init_disk_quota_enforcement(void) static bool quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) { - ListCell *l; + ListCell *l; - foreach(l, rangeTable) + foreach (l, rangeTable) { - List *indexIds; - ListCell *oid; - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + List *indexIds; + ListCell *oid; + RangeTblEntry *rte = (RangeTblEntry *)lfirst(l); /* see ExecCheckRTEPerms() */ - if (rte->rtekind != RTE_RELATION) - continue; + if (rte->rtekind != RTE_RELATION) continue; /* * Only check quota on inserts. UPDATEs may well increase space usage * too, but we ignore that for now. */ - if ((rte->requiredPerms & ACL_INSERT) == 0 && (rte->requiredPerms & ACL_UPDATE) == 0) - continue; + if ((rte->requiredPerms & ACL_INSERT) == 0 && (rte->requiredPerms & ACL_UPDATE) == 0) continue; /* * Given table oid, check whether the quota limit of table's schema or @@ -72,9 +70,9 @@ quota_check_ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) indexIds = diskquota_get_index_list(rte->relid); PG_TRY(); { - if (indexIds != NIL ) + if (indexIds != NIL) { - foreach(oid, indexIds) + foreach (oid, indexIds) { quota_check_common(lfirst_oid(oid), NULL /*relfilenode*/); } diff --git a/gp_activetable.c b/gp_activetable.c index a25659c80d7..231c0bc3156 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -38,19 +38,19 @@ PG_FUNCTION_INFO_V1(diskquota_fetch_table_stat); /* The results set cache for SRF call*/ typedef struct DiskQuotaSetOFCache { - HTAB *result; + HTAB *result; HASH_SEQ_STATUS pos; -} DiskQuotaSetOFCache; +} DiskQuotaSetOFCache; -HTAB *active_tables_map = NULL; -HTAB *monitoring_dbid_cache = NULL; -HTAB *altered_reloid_cache = NULL; +HTAB *active_tables_map = NULL; +HTAB *monitoring_dbid_cache = NULL; +HTAB *altered_reloid_cache = NULL; /* active table hooks which detect the disk file size change. */ -static file_create_hook_type prev_file_create_hook = NULL; -static file_extend_hook_type prev_file_extend_hook = NULL; +static file_create_hook_type prev_file_create_hook = NULL; +static file_extend_hook_type prev_file_extend_hook = NULL; static file_truncate_hook_type prev_file_truncate_hook = NULL; -static file_unlink_hook_type prev_file_unlink_hook = NULL; +static file_unlink_hook_type prev_file_unlink_hook = NULL; static object_access_hook_type prev_object_access_hook = NULL; static void active_table_hook_smgrcreate(RelFileNodeBackend rnode); @@ -59,20 +59,20 @@ static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode); static void active_table_hook_smgrunlink(RelFileNodeBackend rnode); static void object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); -static HTAB *get_active_tables_stats(ArrayType *array); -static HTAB *get_active_tables_oid(void); -static HTAB *pull_active_list_from_seg(void); -static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array); +static HTAB *get_active_tables_stats(ArrayType *array); +static HTAB *get_active_tables_oid(void); +static HTAB *pull_active_list_from_seg(void); +static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array); static StringInfoData convert_map_to_string(HTAB *active_list); -static void load_table_size(HTAB *local_table_stats_map); -static void report_active_table_helper(const RelFileNodeBackend *relFileNode); -static void report_relation_cache_helper(Oid relid); -static void report_altered_reloid(Oid reloid); +static void load_table_size(HTAB *local_table_stats_map); +static void report_active_table_helper(const RelFileNodeBackend *relFileNode); +static void report_relation_cache_helper(Oid relid); +static void report_altered_reloid(Oid reloid); -void init_active_table_hook(void); -void init_shm_worker_active_tables(void); -void init_lock_active_tables(void); -HTAB *gp_fetch_active_tables(bool is_init); +void init_active_table_hook(void); +void init_shm_worker_active_tables(void); +void init_lock_active_tables(void); +HTAB *gp_fetch_active_tables(bool is_init); /* * Init active_tables_map shared memory @@ -80,27 +80,21 @@ HTAB *gp_fetch_active_tables(bool is_init); void init_shm_worker_active_tables(void) { - HASHCTL ctl; + HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.hash = tag_hash; - active_tables_map = ShmemInitHash("active_tables", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hash = tag_hash; + active_tables_map = ShmemInitHash("active_tables", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, + HASH_ELEM | HASH_FUNCTION); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(Oid); - ctl.hash = tag_hash; - altered_reloid_cache = ShmemInitHash("altered_reloid_cache", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + ctl.hash = tag_hash; + altered_reloid_cache = ShmemInitHash("altered_reloid_cache", diskquota_max_active_tables, + diskquota_max_active_tables, &ctl, HASH_ELEM | HASH_FUNCTION); } /* @@ -110,19 +104,19 @@ void init_active_table_hook(void) { prev_file_create_hook = file_create_hook; - file_create_hook = active_table_hook_smgrcreate; + file_create_hook = active_table_hook_smgrcreate; prev_file_extend_hook = file_extend_hook; - file_extend_hook = active_table_hook_smgrextend; + file_extend_hook = active_table_hook_smgrextend; prev_file_truncate_hook = file_truncate_hook; - file_truncate_hook = active_table_hook_smgrtruncate; + file_truncate_hook = active_table_hook_smgrtruncate; prev_file_unlink_hook = file_unlink_hook; - file_unlink_hook = active_table_hook_smgrunlink; + file_unlink_hook = active_table_hook_smgrunlink; prev_object_access_hook = object_access_hook; - object_access_hook = object_access_hook_QuotaStmt; + object_access_hook = object_access_hook_QuotaStmt; } /* @@ -131,8 +125,7 @@ init_active_table_hook(void) static void active_table_hook_smgrcreate(RelFileNodeBackend rnode) { - if (prev_file_create_hook) - (*prev_file_create_hook) (rnode); + if (prev_file_create_hook) (*prev_file_create_hook)(rnode); SIMPLE_FAULT_INJECTOR("diskquota_after_smgrcreate"); report_active_table_helper(&rnode); @@ -146,8 +139,7 @@ active_table_hook_smgrcreate(RelFileNodeBackend rnode) static void active_table_hook_smgrextend(RelFileNodeBackend rnode) { - if (prev_file_extend_hook) - (*prev_file_extend_hook) (rnode); + if (prev_file_extend_hook) (*prev_file_extend_hook)(rnode); report_active_table_helper(&rnode); quota_check_common(InvalidOid /*reloid*/, &rnode.node); @@ -159,8 +151,7 @@ active_table_hook_smgrextend(RelFileNodeBackend rnode) static void active_table_hook_smgrtruncate(RelFileNodeBackend rnode) { - if (prev_file_truncate_hook) - (*prev_file_truncate_hook) (rnode); + if (prev_file_truncate_hook) (*prev_file_truncate_hook)(rnode); report_active_table_helper(&rnode); } @@ -168,8 +159,7 @@ active_table_hook_smgrtruncate(RelFileNodeBackend rnode) static void active_table_hook_smgrunlink(RelFileNodeBackend rnode) { - if (prev_file_unlink_hook) - (*prev_file_unlink_hook) (rnode); + if (prev_file_unlink_hook) (*prev_file_unlink_hook)(rnode); remove_cache_entry(InvalidOid, rnode.node.relNode); } @@ -177,8 +167,7 @@ active_table_hook_smgrunlink(RelFileNodeBackend rnode) static void object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) { - if (prev_object_access_hook) - (*prev_object_access_hook)(access, classId, objectId, subId, arg); + if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); /* TODO: do we need to use "&&" instead of "||"? */ if (classId != RelationRelationId || subId != 0) @@ -193,15 +182,15 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, switch (access) { - case OAT_POST_CREATE: - report_relation_cache_helper(objectId); - break; - case OAT_POST_ALTER: - SIMPLE_FAULT_INJECTOR("object_access_post_alter"); - report_altered_reloid(objectId); - break; - default: - break; + case OAT_POST_CREATE: + report_relation_cache_helper(objectId); + break; + case OAT_POST_ALTER: + SIMPLE_FAULT_INJECTOR("object_access_post_alter"); + report_altered_reloid(objectId); + break; + default: + break; } } @@ -212,8 +201,7 @@ report_altered_reloid(Oid reloid) * We don't collect altered relations' reloid on mirrors * and QD. */ - if (IsRoleMirror() || IS_QUERY_DISPATCHER()) - return; + if (IsRoleMirror() || IS_QUERY_DISPATCHER()) return; LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); hash_search(altered_reloid_cache, &reloid, HASH_ENTER, NULL); @@ -224,7 +212,7 @@ static void report_relation_cache_helper(Oid relid) { bool found; - + /* We do not collect the active table in mirror segments */ if (IsRoleMirror()) { @@ -255,11 +243,10 @@ static void report_active_table_helper(const RelFileNodeBackend *relFileNode) { DiskQuotaActiveTableFileEntry *entry; - DiskQuotaActiveTableFileEntry item; - bool found = false; - Oid dbid = relFileNode->node.dbNode; + DiskQuotaActiveTableFileEntry item; + bool found = false; + Oid dbid = relFileNode->node.dbNode; - /* We do not collect the active table in mirror segments */ if (IsRoleMirror()) { @@ -278,14 +265,13 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) found = false; MemSet(&item, 0, sizeof(DiskQuotaActiveTableFileEntry)); - item.dbid = relFileNode->node.dbNode; - item.relfilenode = relFileNode->node.relNode; + item.dbid = relFileNode->node.dbNode; + item.relfilenode = relFileNode->node.relNode; item.tablespaceoid = relFileNode->node.spcNode; LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); entry = hash_search(active_tables_map, &item, HASH_ENTER_NULL, &found); - if (entry && !found) - *entry = item; + if (entry && !found) *entry = item; if (!found && entry == NULL) { @@ -309,33 +295,30 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) HTAB * gp_fetch_active_tables(bool is_init) { - HTAB *local_table_stats_map = NULL; - HASHCTL ctl; - HTAB *local_active_table_oid_maps; + HTAB *local_table_stats_map = NULL; + HASHCTL ctl; + HTAB *local_active_table_oid_maps; StringInfoData active_oid_list; Assert(Gp_role == GP_ROLE_DISPATCH); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(TableEntryKey); + ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; - local_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_table_stats_map = hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); if (is_init) { load_table_size(local_table_stats_map); - } - else + } else { /* step 1: fetch active oids from all the segments */ local_active_table_oid_maps = pull_active_list_from_seg(); - active_oid_list = convert_map_to_string(local_active_table_oid_maps); + active_oid_list = convert_map_to_string(local_active_table_oid_maps); /* step 2: fetch active table sizes based on active oids */ pull_active_table_size_from_seg(local_table_stats_map, active_oid_list.data); @@ -349,48 +332,45 @@ gp_fetch_active_tables(bool is_init) /* * Function to get the table size from each segments * There are 4 modes: - * - * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since + * + * - FETCH_ACTIVE_OID: gather active table oid from all the segments, since * table may only be modified on a subset of the segments, we need to firstly * gather the active table oid list from all the segments. - * + * * - FETCH_ACTIVE_SIZE: calculate the active table size based on the active * table oid list. - * - * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that + * + * - ADD_DB_TO_MONITOR: add MyDatabaseId to the monitored db cache so that * active tables in the current database will be recorded. This is used each * time a worker starts. - * - * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored - * db cache so that active tables in the current database will be recorded. - * This is used when DROP EXTENSION. + * + * - REMOVE_DB_FROM_BEING_MONITORED: remove MyDatabaseId from the monitored + * db cache so that active tables in the current database will be recorded. + * This is used when DROP EXTENSION. */ Datum diskquota_fetch_table_stat(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; - int32 mode = PG_GETARG_INT32(0); - AttInMetadata *attinmeta; - bool isFirstCall = true; + int32 mode = PG_GETARG_INT32(0); + AttInMetadata *attinmeta; + bool isFirstCall = true; - HTAB *localCacheTable = NULL; - DiskQuotaSetOFCache *cache = NULL; - DiskQuotaActiveTableEntry *results_entry = NULL; + HTAB *localCacheTable = NULL; + DiskQuotaSetOFCache *cache = NULL; + DiskQuotaActiveTableEntry *results_entry = NULL; /* Init the container list in the first call and get the results back */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; - TupleDesc tupdesc; - int extMajorVersion; - int ret_code = SPI_connect(); + TupleDesc tupdesc; + int extMajorVersion; + int ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg( - "unable to connect to execute internal query. return code: %d.", - ret_code))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); } extMajorVersion = get_ext_major_version(); SPI_finish(); @@ -423,14 +403,13 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) default: ereport(ERROR, (errmsg("Unused mode number %d, transaction will be aborted", mode))); break; - } /* * total number of active tables to be returned, each tuple contains * one active table stat */ - funcctx->max_calls = localCacheTable ? (uint32) hash_get_num_entries(localCacheTable) : 0; + funcctx->max_calls = localCacheTable ? (uint32)hash_get_num_entries(localCacheTable) : 0; /* * prepare attribute metadata for next calls that generate the tuple @@ -442,30 +421,25 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) break; case 2: tupdesc = CreateTemplateTupleDesc(3, false); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "GP_SEGMENT_ID", - INT2OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "GP_SEGMENT_ID", INT2OID, -1, 0); break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "TABLE_OID", - OIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "TABLE_SIZE", - INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLE_OID", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "TABLE_SIZE", INT8OID, -1, 0); - attinmeta = TupleDescGetAttInMetadata(tupdesc); + attinmeta = TupleDescGetAttInMetadata(tupdesc); funcctx->attinmeta = attinmeta; /* Prepare SetOf results HATB */ - cache = (DiskQuotaSetOFCache *) palloc(sizeof(DiskQuotaSetOFCache)); + cache = (DiskQuotaSetOFCache *)palloc(sizeof(DiskQuotaSetOFCache)); cache->result = localCacheTable; hash_seq_init(&(cache->pos), localCacheTable); MemoryContextSwitchTo(oldcontext); - } - else + } else { isFirstCall = false; } @@ -474,20 +448,19 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) if (isFirstCall) { - funcctx->user_fctx = (void *) cache; - } - else + funcctx->user_fctx = (void *)cache; + } else { - cache = (DiskQuotaSetOFCache *) funcctx->user_fctx; + cache = (DiskQuotaSetOFCache *)funcctx->user_fctx; } /* return the results back to SPI caller */ - while ((results_entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&(cache->pos))) != NULL) + while ((results_entry = (DiskQuotaActiveTableEntry *)hash_seq_search(&(cache->pos))) != NULL) { - Datum result; - Datum values[3]; - bool nulls[3]; - HeapTuple tuple; + Datum result; + Datum values[3]; + bool nulls[3]; + HeapTuple tuple; memset(values, 0, sizeof(values)); memset(nulls, false, sizeof(nulls)); @@ -516,46 +489,41 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) static HTAB * get_active_tables_stats(ArrayType *array) { - int ndim = ARR_NDIM(array); - int *dims = ARR_DIMS(array); - int nitems; - int16 typlen; - bool typbyval; - char typalign; - char *ptr; - bits8 *bitmap; - int bitmask; - int i; - Oid relOid; - int segId; - HTAB *local_table = NULL; - HASHCTL ctl; - TableEntryKey key; + int ndim = ARR_NDIM(array); + int *dims = ARR_DIMS(array); + int nitems; + int16 typlen; + bool typbyval; + char typalign; + char *ptr; + bits8 *bitmap; + int bitmask; + int i; + Oid relOid; + int segId; + HTAB *local_table = NULL; + HASHCTL ctl; + TableEntryKey key; DiskQuotaActiveTableEntry *entry; - bool found; + bool found; Assert(ARR_ELEMTYPE(array) == OIDOID); nitems = ArrayGetNItems(ndim, dims); - get_typlenbyvalalign(ARR_ELEMTYPE(array), - &typlen, &typbyval, &typalign); + get_typlenbyvalalign(ARR_ELEMTYPE(array), &typlen, &typbyval, &typalign); - - ptr = ARR_DATA_PTR(array); - bitmap = ARR_NULLBITMAP(array); + ptr = ARR_DATA_PTR(array); + bitmap = ARR_NULLBITMAP(array); bitmask = 1; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(TableEntryKey); + ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; - local_table = hash_create("local table map", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_table = hash_create("local table map", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); for (i = 0; i < nitems; i++) { @@ -566,25 +534,23 @@ get_active_tables_stats(ArrayType *array) if (bitmap && (*bitmap & bitmask) == 0) { continue; - } - else + } else { - relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); - segId = GpIdentity.segindex; + relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + segId = GpIdentity.segindex; key.reloid = relOid; - key.segid = segId; + key.segid = segId; - entry = (DiskQuotaActiveTableEntry *) hash_search(local_table, &key, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table, &key, HASH_ENTER, &found); if (!found) { - entry->reloid = relOid; - entry->segid = segId; + entry->reloid = relOid; + entry->segid = segId; entry->tablesize = calculate_table_size(relOid); } ptr = att_addlength_pointer(ptr, typlen, ptr); - ptr = (char *) att_align_nominal(ptr, typalign); - + ptr = (char *)att_align_nominal(ptr, typalign); } /* advance bitmap pointer if any */ @@ -609,7 +575,7 @@ get_active_tables_stats(ArrayType *array) static inline void SetLocktagRelationOid(LOCKTAG *tag, Oid relid) { - Oid dbid; + Oid dbid; if (IsSharedRelation(relid)) dbid = InvalidOid; @@ -624,8 +590,8 @@ is_relation_being_altered(Oid relid) { LOCKTAG locktag; SetLocktagRelationOid(&locktag, relid); - VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); - bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ + VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); + bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ pfree(vxid_list); return being_altered; } @@ -640,36 +606,32 @@ is_relation_being_altered(Oid relid) static HTAB * get_active_tables_oid(void) { - HASHCTL ctl; - HTAB *local_active_table_file_map = NULL; - HTAB *local_active_table_stats_map = NULL; - HTAB *local_altered_reloid_cache = NULL; - HASH_SEQ_STATUS iter; + HASHCTL ctl; + HTAB *local_active_table_file_map = NULL; + HTAB *local_active_table_stats_map = NULL; + HTAB *local_altered_reloid_cache = NULL; + HASH_SEQ_STATUS iter; DiskQuotaActiveTableFileEntry *active_table_file_entry; - DiskQuotaActiveTableEntry *active_table_entry; - Oid *altered_reloid_entry; + DiskQuotaActiveTableEntry *active_table_entry; + Oid *altered_reloid_entry; - Oid relOid; + Oid relOid; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - local_active_table_file_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; + local_active_table_file_map = hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(Oid); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - local_altered_reloid_cache = hash_create("local_altered_reloid_cache", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = tag_hash; + local_altered_reloid_cache = + hash_create("local_altered_reloid_cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); /* Move active table from shared memory to local active table map */ LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); @@ -677,9 +639,9 @@ get_active_tables_oid(void) hash_seq_init(&iter, active_tables_map); /* copy active table from shared memory into local memory */ - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) { - bool found; + bool found; DiskQuotaActiveTableFileEntry *entry; if (active_table_file_entry->dbid != MyDatabaseId) @@ -689,8 +651,7 @@ get_active_tables_oid(void) /* Add the active table entry into local hash table */ entry = hash_search(local_active_table_file_map, active_table_file_entry, HASH_ENTER, &found); - if (entry) - *entry = *active_table_file_entry; + if (entry) *entry = *active_table_file_entry; hash_search(active_tables_map, active_table_file_entry, HASH_REMOVE, NULL); } // TODO: hash_seq_term(&iter); @@ -698,15 +659,13 @@ get_active_tables_oid(void) memset(&ctl, 0, sizeof(ctl)); /* only use Oid as key here, segid is not needed */ - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; - local_active_table_stats_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_active_table_stats_map = hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); remove_committed_relation_from_cache(); @@ -716,27 +675,27 @@ get_active_tables_oid(void) */ hash_seq_init(&iter, local_active_table_file_map); - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) { - bool found; + bool found; RelFileNode rnode; - Oid prelid; + Oid prelid; - rnode.dbNode = active_table_file_entry->dbid; + rnode.dbNode = active_table_file_entry->dbid; rnode.relNode = active_table_file_entry->relfilenode; rnode.spcNode = active_table_file_entry->tablespaceoid; - relOid = get_relid_by_relfilenode(rnode); + relOid = get_relid_by_relfilenode(rnode); if (relOid != InvalidOid) { - prelid = get_primary_table_oid(relOid); + prelid = get_primary_table_oid(relOid); active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); if (active_table_entry && !found) { active_table_entry->reloid = prelid; /* we don't care segid and tablesize here */ active_table_entry->tablesize = 0; - active_table_entry->segid = -1; + active_table_entry->segid = -1; } if (!is_relation_being_altered(relOid)) hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); @@ -744,66 +703,58 @@ get_active_tables_oid(void) } // TODO: hash_seq_term(&iter); - + /* Adding the remaining relfilenodes back to the map in the shared memory */ LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); hash_seq_init(&iter, local_active_table_file_map); - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) { /* TODO: handle possible ERROR here so that the bgworker will not go down. */ - hash_search(active_tables_map, active_table_file_entry, HASH_ENTER, NULL); + hash_search(active_tables_map, active_table_file_entry, HASH_ENTER, NULL); } /* TODO: hash_seq_term(&iter); */ LWLockRelease(diskquota_locks.active_table_lock); - LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_SHARED); hash_seq_init(&iter, altered_reloid_cache); - while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) { - bool found; - Oid altered_oid = *altered_reloid_entry; + bool found; + Oid altered_oid = *altered_reloid_entry; if (OidIsValid(*altered_reloid_entry)) { - active_table_entry = hash_search(local_active_table_stats_map, - &altered_oid, - HASH_ENTER, &found); + active_table_entry = hash_search(local_active_table_stats_map, &altered_oid, HASH_ENTER, &found); if (!found && active_table_entry) { active_table_entry->reloid = altered_oid; /* We don't care segid and tablesize here. */ active_table_entry->tablesize = 0; - active_table_entry->segid = -1; + active_table_entry->segid = -1; } } - hash_search(local_altered_reloid_cache, - &altered_oid, HASH_ENTER, NULL); + hash_search(local_altered_reloid_cache, &altered_oid, HASH_ENTER, NULL); } LWLockRelease(diskquota_locks.altered_reloid_cache_lock); hash_seq_init(&iter, local_altered_reloid_cache); - while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) { - if (OidIsValid(*altered_reloid_entry) && - !is_relation_being_altered(*altered_reloid_entry)) + if (OidIsValid(*altered_reloid_entry) && !is_relation_being_altered(*altered_reloid_entry)) { - hash_search(local_altered_reloid_cache, - altered_reloid_entry, HASH_REMOVE, NULL); + hash_search(local_altered_reloid_cache, altered_reloid_entry, HASH_REMOVE, NULL); } } LWLockAcquire(diskquota_locks.altered_reloid_cache_lock, LW_EXCLUSIVE); hash_seq_init(&iter, altered_reloid_cache); - while ((altered_reloid_entry = (Oid *) hash_seq_search(&iter)) != NULL) + while ((altered_reloid_entry = (Oid *)hash_seq_search(&iter)) != NULL) { - bool found; - Oid altered_reloid = *altered_reloid_entry; - hash_search(local_altered_reloid_cache, &altered_reloid, - HASH_FIND, &found); + bool found; + Oid altered_reloid = *altered_reloid_entry; + hash_search(local_altered_reloid_cache, &altered_reloid, HASH_FIND, &found); if (!found) { - hash_search(altered_reloid_cache, &altered_reloid, - HASH_REMOVE, NULL); + hash_search(altered_reloid_cache, &altered_reloid, HASH_REMOVE, NULL); } } LWLockRelease(diskquota_locks.altered_reloid_cache_lock); @@ -814,16 +765,15 @@ get_active_tables_oid(void) */ if (hash_get_num_entries(local_active_table_file_map) > 0) { - bool found; + bool found; DiskQuotaActiveTableFileEntry *entry; hash_seq_init(&iter, local_active_table_file_map); LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); - while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *) hash_seq_search(&iter)) != NULL) + while ((active_table_file_entry = (DiskQuotaActiveTableFileEntry *)hash_seq_search(&iter)) != NULL) { entry = hash_search(active_tables_map, active_table_file_entry, HASH_ENTER_NULL, &found); - if (entry) - *entry = *active_table_file_entry; + if (entry) *entry = *active_table_file_entry; } LWLockRelease(diskquota_locks.active_table_lock); } @@ -836,17 +786,17 @@ get_active_tables_oid(void) * Load table size info from diskquota.table_size table. * This is called when system startup, disk quota black list * and other shared memory will be warmed up by table_size table. -*/ + */ static void load_table_size(HTAB *local_table_stats_map) { - int ret; - TupleDesc tupdesc; - int i; - bool found; - TableEntryKey key; + int ret; + TupleDesc tupdesc; + int i; + bool found; + TableEntryKey key; DiskQuotaActiveTableEntry *quota_entry; - int extMajorVersion = get_ext_major_version(); + int extMajorVersion = get_ext_major_version(); switch (extMajorVersion) { case 1: @@ -856,68 +806,57 @@ load_table_size(HTAB *local_table_stats_map) ret = SPI_execute("select tableid, size, segid from diskquota.table_size", true, 0); break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } if (ret != SPI_OK_SELECT) ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: return code %d, error: %m", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 3 || - ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT8OID || - ((tupdesc)->attrs[2])->atttypid != INT2OID) + if (tupdesc->natts != 3 || ((tupdesc)->attrs[0])->atttypid != OIDOID || + ((tupdesc)->attrs[1])->atttypid != INT8OID || ((tupdesc)->attrs[2])->atttypid != INT2OID) { if (tupdesc->natts != 3) { ereport(WARNING, (errmsg("[diskquota] tupdesc->natts: %d", tupdesc->natts))); - } - else + } else { - ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", - tupdesc->attrs[0]->atttypid, tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); + ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0]->atttypid, + tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); } ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } /* push the table oid and size into local_table_stats_map */ for (i = 0; i < SPI_processed; i++) { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - Oid reloid; - int64 size; - int16 segid; - bool isnull; + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + Oid reloid; + int64 size; + int16 segid; + bool isnull; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - continue; + if (isnull) continue; reloid = DatumGetObjectId(dat); dat = SPI_getbinval(tup, tupdesc, 2, &isnull); - if (isnull) - continue; + if (isnull) continue; size = DatumGetInt64(dat); - dat = SPI_getbinval(tup, tupdesc, 3, &isnull); - if (isnull) - continue; - segid = DatumGetInt16(dat); + dat = SPI_getbinval(tup, tupdesc, 3, &isnull); + if (isnull) continue; + segid = DatumGetInt16(dat); key.reloid = reloid; - key.segid = segid; - + key.segid = segid; - quota_entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, - &key, - HASH_ENTER, &found); + quota_entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); quota_entry->reloid = reloid; quota_entry->tablesize = size; - quota_entry->segid = segid; + quota_entry->segid = segid; } return; } @@ -930,25 +869,24 @@ load_table_size(HTAB *local_table_stats_map) static StringInfoData convert_map_to_string(HTAB *local_active_table_oid_maps) { - HASH_SEQ_STATUS iter; - StringInfoData buffer; + HASH_SEQ_STATUS iter; + StringInfoData buffer; DiskQuotaActiveTableEntry *entry; - uint32 count = 0; - uint32 nitems = hash_get_num_entries(local_active_table_oid_maps); + uint32 count = 0; + uint32 nitems = hash_get_num_entries(local_active_table_oid_maps); initStringInfo(&buffer); appendStringInfo(&buffer, "{"); hash_seq_init(&iter, local_active_table_oid_maps); - while ((entry = (DiskQuotaActiveTableEntry *) hash_seq_search(&iter)) != NULL) + while ((entry = (DiskQuotaActiveTableEntry *)hash_seq_search(&iter)) != NULL) { count++; if (count != nitems) { appendStringInfo(&buffer, "%d,", entry->reloid); - } - else + } else { appendStringInfo(&buffer, "%d", entry->reloid); } @@ -967,25 +905,21 @@ convert_map_to_string(HTAB *local_active_table_oid_maps) static HTAB * pull_active_list_from_seg(void) { - CdbPgResults cdb_pgresults = {NULL, 0}; - int i, - j; - char *sql = NULL; - HTAB *local_active_table_oid_map = NULL; - HASHCTL ctl; + CdbPgResults cdb_pgresults = {NULL, 0}; + int i, j; + char *sql = NULL; + HTAB *local_active_table_oid_map = NULL; + HASHCTL ctl; DiskQuotaActiveTableEntry *entry; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_active_table_oid_map = hash_create("local active table map with relfilenode info", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; + local_active_table_oid_map = hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); /* first get all oid of tables which are active table on any segment */ sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; @@ -994,17 +928,16 @@ pull_active_list_from_seg(void) CdbDispatchCommand(sql, DF_NONE, &cdb_pgresults); for (i = 0; i < cdb_pgresults.numResults; i++) { - Oid reloid; - bool found; + Oid reloid; + bool found; PGresult *pgresult = cdb_pgresults.pg_results[i]; if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", - PQresultStatus(pgresult)))); + ereport(ERROR, (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", + PQresultStatus(pgresult)))); } /* push the active table oid into local_active_table_oid_map */ @@ -1012,13 +945,13 @@ pull_active_list_from_seg(void) { reloid = atooid(PQgetvalue(pgresult, j, 0)); - entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_oid_map, &reloid, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *)hash_search(local_active_table_oid_map, &reloid, HASH_ENTER, &found); if (!found) { - entry->reloid = reloid; + entry->reloid = reloid; entry->tablesize = 0; - entry->segid = -1; + entry->segid = -1; } } } @@ -1039,33 +972,31 @@ pull_active_list_from_seg(void) static void pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_array) { - CdbPgResults cdb_pgresults = {NULL, 0}; + CdbPgResults cdb_pgresults = {NULL, 0}; StringInfoData sql_command; - int i; - int j; + int i; + int j; initStringInfo(&sql_command); appendStringInfo(&sql_command, "select * from diskquota.diskquota_fetch_table_stat(1, '%s'::oid[])", - active_oid_array); + active_oid_array); CdbDispatchCommand(sql_command.data, DF_NONE, &cdb_pgresults); pfree(sql_command.data); SEGCOUNT = cdb_pgresults.numResults; - if (SEGCOUNT <= 0 ) + if (SEGCOUNT <= 0) { - ereport(ERROR, - (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + ereport(ERROR, (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); } /* sum table size from each segment into local_table_stats_map */ for (i = 0; i < cdb_pgresults.numResults; i++) { - - Size tableSize; - bool found; - Oid reloid; - int segId; - TableEntryKey key; + Size tableSize; + bool found; + Oid reloid; + int segId; + TableEntryKey key; DiskQuotaActiveTableEntry *entry; PGresult *pgresult = cdb_pgresults.pg_results[i]; @@ -1073,53 +1004,48 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) { cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, - (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", - PQresultStatus(pgresult)))); + ereport(ERROR, (errmsg("[diskquota] fetching active tables, encounter unexpected result from segment: %d", + PQresultStatus(pgresult)))); } for (j = 0; j < PQntuples(pgresult); j++) { - reloid = atooid(PQgetvalue(pgresult, j, 0)); - tableSize = (Size) atoll(PQgetvalue(pgresult, j, 1)); + reloid = atooid(PQgetvalue(pgresult, j, 0)); + tableSize = (Size)atoll(PQgetvalue(pgresult, j, 1)); key.reloid = reloid; /* for diskquota extension version is 1.0, pgresult doesn't contain segid */ if (PQnfields(pgresult) == 3) { /* get the segid, tablesize for each table */ - segId = atoi(PQgetvalue(pgresult, j, 2)); + segId = atoi(PQgetvalue(pgresult, j, 2)); key.segid = segId; - entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, &key, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); if (!found) { /* receive table size info from the first segment */ entry->reloid = reloid; - entry->segid = segId; + entry->segid = segId; } entry->tablesize = tableSize; } /* when segid is -1, the tablesize is the sum of tablesize of master and all segments */ key.segid = -1; - entry = (DiskQuotaActiveTableEntry *) hash_search( - local_table_stats_map, &key, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); if (!found) { /* receive table size info from the first segment */ - entry->reloid = reloid; + entry->reloid = reloid; entry->tablesize = tableSize; - entry->segid = -1; - } - else + entry->segid = -1; + } else { /* sum table size from all the segments */ entry->tablesize = entry->tablesize + tableSize; } - } } cdbdisp_clearCdbPgResults(&cdb_pgresults); diff --git a/gp_activetable.h b/gp_activetable.h index 66ccc2916e8..317d703d125 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -7,34 +7,34 @@ /* Cache to detect the active table list */ typedef struct DiskQuotaActiveTableFileEntry { - Oid dbid; - Oid relfilenode; - Oid tablespaceoid; -} DiskQuotaActiveTableFileEntry; + Oid dbid; + Oid relfilenode; + Oid tablespaceoid; +} DiskQuotaActiveTableFileEntry; typedef struct TableEntryKey { - Oid reloid; - int segid; -} TableEntryKey; + Oid reloid; + int segid; +} TableEntryKey; typedef struct DiskQuotaActiveTableEntry { - Oid reloid; - int segid; - Size tablesize; -} DiskQuotaActiveTableEntry; + Oid reloid; + int segid; + Size tablesize; +} DiskQuotaActiveTableEntry; extern HTAB *gp_fetch_active_tables(bool force); -extern void init_active_table_hook(void); -extern void init_shm_worker_active_tables(void); -extern void init_lock_active_tables(void); -extern void update_diskquota_db_list(Oid dbid, HASHACTION action); +extern void init_active_table_hook(void); +extern void init_shm_worker_active_tables(void); +extern void init_lock_active_tables(void); +extern void update_diskquota_db_list(Oid dbid, HASHACTION action); extern HTAB *active_tables_map; extern HTAB *monitoring_dbid_cache; extern HTAB *altered_reloid_cache; -#define atooid(x) ((Oid) strtoul((x), NULL, 10)) +#define atooid(x) ((Oid)strtoul((x), NULL, 10)) #endif diff --git a/quotamodel.c b/quotamodel.c index 9c105a65f00..d134a79a17a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -50,97 +50,87 @@ /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 5 -typedef struct TableSizeEntry TableSizeEntry; -typedef struct NamespaceSizeEntry NamespaceSizeEntry; -typedef struct RoleSizeEntry RoleSizeEntry; -typedef struct QuotaLimitEntry QuotaLimitEntry; -typedef struct BlackMapEntry BlackMapEntry; +typedef struct TableSizeEntry TableSizeEntry; +typedef struct NamespaceSizeEntry NamespaceSizeEntry; +typedef struct RoleSizeEntry RoleSizeEntry; +typedef struct QuotaLimitEntry QuotaLimitEntry; +typedef struct BlackMapEntry BlackMapEntry; typedef struct GlobalBlackMapEntry GlobalBlackMapEntry; -typedef struct LocalBlackMapEntry LocalBlackMapEntry; +typedef struct LocalBlackMapEntry LocalBlackMapEntry; - -int SEGCOUNT = 0; +int SEGCOUNT = 0; /* * local cache of table disk size and corresponding schema and owner */ struct TableSizeEntry { - Oid reloid; - int16 segid; - Oid tablespaceoid; - Oid namespaceoid; - Oid owneroid; - int64 totalsize; /* table size including fsm, visibility map - * etc. */ - bool is_exist; /* flag used to check whether table is already - * dropped */ - bool need_flush; /* whether need to flush to table table_size */ + Oid reloid; + int16 segid; + Oid tablespaceoid; + Oid namespaceoid; + Oid owneroid; + int64 totalsize; /* table size including fsm, visibility map + * etc. */ + bool is_exist; /* flag used to check whether table is already + * dropped */ + bool need_flush; /* whether need to flush to table table_size */ }; -struct QuotaMapEntryKey { - Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; +struct QuotaMapEntryKey +{ + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; int16 segid; }; -struct QuotaMapEntry { - Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; +struct QuotaMapEntry +{ + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; int16 segid; int64 size; int64 limit; }; -struct QuotaInfo { - char *map_name; +struct QuotaInfo +{ + char *map_name; unsigned int num_keys; - Oid *sys_cache; - HTAB *map; + Oid *sys_cache; + HTAB *map; }; struct QuotaInfo quota_info[NUM_QUOTA_TYPES] = { - [NAMESPACE_QUOTA] = { - .map_name = "Namespace map", - .num_keys = 1, - .sys_cache = (Oid[]){ NAMESPACEOID }, - .map = NULL - }, - [ROLE_QUOTA] = { - .map_name = "Role map", - .num_keys = 1, - .sys_cache = (Oid[]){ AUTHOID }, - .map = NULL - }, - [NAMESPACE_TABLESPACE_QUOTA] = { - .map_name = "Tablespace-namespace map", - .num_keys = 2, - .sys_cache = (Oid[]){ NAMESPACEOID, TABLESPACEOID }, - .map = NULL - }, - [ROLE_TABLESPACE_QUOTA] = { - .map_name = "Tablespace-role map", - .num_keys = 2, - .sys_cache = (Oid[]){ AUTHOID, TABLESPACEOID }, - .map = NULL - } -}; + [NAMESPACE_QUOTA] = {.map_name = "Namespace map", + .num_keys = 1, + .sys_cache = (Oid[]){NAMESPACEOID}, + .map = NULL}, + [ROLE_QUOTA] = {.map_name = "Role map", .num_keys = 1, .sys_cache = (Oid[]){AUTHOID}, .map = NULL}, + [NAMESPACE_TABLESPACE_QUOTA] = {.map_name = "Tablespace-namespace map", + .num_keys = 2, + .sys_cache = (Oid[]){NAMESPACEOID, TABLESPACEOID}, + .map = NULL}, + [ROLE_TABLESPACE_QUOTA] = {.map_name = "Tablespace-role map", + .num_keys = 2, + .sys_cache = (Oid[]){AUTHOID, TABLESPACEOID}, + .map = NULL}}; /* global blacklist for which exceed their quota limit */ struct BlackMapEntry { - Oid targetoid; - Oid databaseoid; - Oid tablespaceoid; - uint32 targettype; + Oid targetoid; + Oid databaseoid; + Oid tablespaceoid; + uint32 targettype; /* * QD index the blackmap by (targetoid, databaseoid, tablespaceoid, targettype). * QE index the blackmap by (relfilenode). */ - RelFileNode relfilenode; + RelFileNode relfilenode; }; struct GlobalBlackMapEntry { - BlackMapEntry keyitem; - bool segexceeded; + BlackMapEntry keyitem; + bool segexceeded; /* * When the quota limit is exceeded on segment servers, * we need an extra auxiliary field to preserve the quota @@ -148,35 +138,35 @@ struct GlobalBlackMapEntry * servers, e.g., targettype, targetoid. This field is * useful on segment servers. */ - BlackMapEntry auxblockinfo; + BlackMapEntry auxblockinfo; }; /* local blacklist for which exceed their quota limit */ struct LocalBlackMapEntry { - BlackMapEntry keyitem; - bool isexceeded; - bool segexceeded; + BlackMapEntry keyitem; + bool isexceeded; + bool segexceeded; }; /* using hash table to support incremental update the table size entry.*/ static HTAB *table_size_map = NULL; /* black list for database objects which exceed their quota limit */ -static HTAB *disk_quota_black_map = NULL; +static HTAB *disk_quota_black_map = NULL; static HTAB *local_disk_quota_black_map = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ static void init_all_quota_maps(void); -static void update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid); -static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys); -static void remove_quota(QuotaType type, Oid* keys, int16 segid); +static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid); +static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys); +static void remove_quota(QuotaType type, Oid *keys, int16 segid); static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); static void check_quota_map(QuotaType type); static void clear_all_quota_maps(void); -static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys, int16 segid); +static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); @@ -193,40 +183,39 @@ static void disk_quota_shmem_startup(void); static void init_lwlocks(void); static void export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name); -void truncateStringInfo(StringInfo str, int nchars); +void truncateStringInfo(StringInfo str, int nchars); static void init_all_quota_maps(void) { - HASHCTL hash_ctl = {0}; + HASHCTL hash_ctl = {0}; hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.hcxt = TopMemoryContext; + hash_ctl.hcxt = TopMemoryContext; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - hash_ctl.hash = tag_hash; + hash_ctl.hash = tag_hash; if (quota_info[type].map != NULL) { hash_destroy(quota_info[type].map); } - quota_info[type].map = hash_create( - quota_info[type].map_name, 1024L, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + quota_info[type].map = + hash_create(quota_info[type].map_name, 1024L, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } } /* add a new entry quota or update the old entry quota */ static void -update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid) +update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) { - bool found; + bool found; struct QuotaMapEntryKey key = {0}; memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); - key.segid = segid; - struct QuotaMapEntry *entry = hash_search( - quota_info[type].map, &key, HASH_ENTER, &found); + key.segid = segid; + struct QuotaMapEntry *entry = hash_search(quota_info[type].map, &key, HASH_ENTER, &found); if (!found) { - entry->size = 0; + entry->size = 0; entry->limit = -1; memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); entry->segid = key.segid; @@ -236,16 +225,15 @@ update_size_for_quota(int64 size, QuotaType type, Oid* keys, int16 segid) /* add a new entry quota or update the old entry limit */ static void -update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys) +update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys) { bool found; - for ( int i = -1; i < SEGCOUNT ; i++) + for (int i = -1; i < SEGCOUNT; i++) { struct QuotaMapEntryKey key = {0}; memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); - key.segid = i; - struct QuotaMapEntry *entry = hash_search( - quota_info[type].map, &key, HASH_ENTER, &found); + key.segid = i; + struct QuotaMapEntry *entry = hash_search(quota_info[type].map, &key, HASH_ENTER, &found); if (!found) { entry->size = 0; @@ -255,8 +243,7 @@ update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys) if (key.segid == -1) { entry->limit = limit; - } - else + } else { entry->limit = round((limit / SEGCOUNT) * segratio); } @@ -265,7 +252,7 @@ update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid* keys) /* remove a entry quota from the map */ static void -remove_quota(QuotaType type, Oid* keys, int16 segid) +remove_quota(QuotaType type, Oid *keys, int16 segid) { struct QuotaMapEntryKey key = {0}; memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); @@ -280,21 +267,17 @@ remove_quota(QuotaType type, Oid* keys, int16 segid) static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded) { - LocalBlackMapEntry *localblackentry; - BlackMapEntry keyitem = {0}; + BlackMapEntry keyitem = {0}; - keyitem.targetoid = targetOid; - keyitem.databaseoid = MyDatabaseId; + keyitem.targetoid = targetOid; + keyitem.databaseoid = MyDatabaseId; keyitem.tablespaceoid = tablespaceoid; - keyitem.targettype = (uint32) type; + keyitem.targettype = (uint32)type; ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist", targetOid))); - localblackentry = (LocalBlackMapEntry *) hash_search(local_disk_quota_black_map, - &keyitem, - HASH_ENTER, NULL); - localblackentry->isexceeded = true; + localblackentry = (LocalBlackMapEntry *)hash_search(local_disk_quota_black_map, &keyitem, HASH_ENTER, NULL); + localblackentry->isexceeded = true; localblackentry->segexceeded = segexceeded; - } /* @@ -305,8 +288,8 @@ add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool se static void check_quota_map(QuotaType type) { - HeapTuple tuple; - HASH_SEQ_STATUS iter; + HeapTuple tuple; + HASH_SEQ_STATUS iter; struct QuotaMapEntry *entry; hash_seq_init(&iter, quota_info[type].map); @@ -333,8 +316,9 @@ check_quota_map(QuotaType type) /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespaceoid * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid */ - Oid tablespaceoid = - (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) ? entry->keys[1] : InvalidOid; + Oid tablespaceoid = (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) + ? entry->keys[1] + : InvalidOid; bool segmentExceeded = entry->segid == -1 ? false : true; add_quota_to_blacklist(type, targetOid, tablespaceoid, segmentExceeded); @@ -345,7 +329,7 @@ check_quota_map(QuotaType type) /* transfer one table's size from one quota to another quota */ static void -transfer_table_for_quota(int64 totalsize, QuotaType type, Oid* old_keys, Oid* new_keys, int16 segid) +transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid) { update_size_for_quota(-totalsize, type, old_keys, segid); update_size_for_quota(totalsize, type, new_keys, segid); @@ -361,7 +345,7 @@ clear_all_quota_maps(void) struct QuotaMapEntry *entry = NULL; while ((entry = hash_seq_search(&iter)) != NULL) { - entry->limit = -1; + entry->limit = -1; } } } @@ -386,7 +370,7 @@ init_disk_quota_shmem(void) /* Install startup hook to initialize our shared memory. */ prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = disk_quota_shmem_startup; + shmem_startup_hook = disk_quota_shmem_startup; } /* @@ -396,11 +380,10 @@ init_disk_quota_shmem(void) static void disk_quota_shmem_startup(void) { - bool found; - HASHCTL hash_ctl; + bool found; + HASHCTL hash_ctl; - if (prev_shmem_startup_hook) - (*prev_shmem_startup_hook) (); + if (prev_shmem_startup_hook) (*prev_shmem_startup_hook)(); LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); @@ -412,49 +395,37 @@ disk_quota_shmem_startup(void) * to store out-of-quota blacklist. active_tables_map is used to store * active tables whose disk usage is changed. */ - extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", - sizeof(ExtensionDDLMessage), - &found); - if (!found) - memset((void *) extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); + extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); + if (!found) memset((void *)extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(BlackMapEntry); + hash_ctl.keysize = sizeof(BlackMapEntry); hash_ctl.entrysize = sizeof(GlobalBlackMapEntry); - hash_ctl.hash = tag_hash; + hash_ctl.hash = tag_hash; - disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", - INIT_DISK_QUOTA_BLACK_ENTRIES, - MAX_DISK_QUOTA_BLACK_ENTRIES, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", INIT_DISK_QUOTA_BLACK_ENTRIES, + MAX_DISK_QUOTA_BLACK_ENTRIES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); init_shm_worker_active_tables(); init_shm_worker_relation_cache(); memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); + hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(Oid); - hash_ctl.hash = oid_hash; + hash_ctl.hash = oid_hash; - monitoring_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", - MAX_NUM_MONITORED_DB, - MAX_NUM_MONITORED_DB, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + monitoring_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, + MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM | HASH_FUNCTION); /* use disk_quota_worker_map to manage diskquota worker processes. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); + hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); - hash_ctl.hash = oid_hash; + hash_ctl.hash = oid_hash; - disk_quota_worker_map = ShmemInitHash("disk quota worker map", - MAX_NUM_MONITORED_DB, - MAX_NUM_MONITORED_DB, - &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + disk_quota_worker_map = ShmemInitHash("disk quota worker map", MAX_NUM_MONITORED_DB, MAX_NUM_MONITORED_DB, + &hash_ctl, HASH_ELEM | HASH_FUNCTION); LWLockRelease(AddinShmemInitLock); } @@ -472,14 +443,14 @@ disk_quota_shmem_startup(void) static void init_lwlocks(void) { - diskquota_locks.active_table_lock = LWLockAssign(); - diskquota_locks.black_map_lock = LWLockAssign(); + diskquota_locks.active_table_lock = LWLockAssign(); + diskquota_locks.black_map_lock = LWLockAssign(); diskquota_locks.extension_ddl_message_lock = LWLockAssign(); - diskquota_locks.extension_ddl_lock = LWLockAssign(); + diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); - diskquota_locks.relation_cache_lock = LWLockAssign(); - diskquota_locks.worker_map_lock = LWLockAssign(); - diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); + diskquota_locks.relation_cache_lock = LWLockAssign(); + diskquota_locks.worker_map_lock = LWLockAssign(); + diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); } /* @@ -489,7 +460,7 @@ init_lwlocks(void) static Size DiskQuotaShmemSize(void) { - Size size; + Size size; size = sizeof(ExtensionDDLMessage); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(GlobalBlackMapEntry))); @@ -502,7 +473,6 @@ DiskQuotaShmemSize(void) return size; } - /* ---- Functions for disk quota model ---- */ /* * Init disk quota model when the worker process firstly started. @@ -510,19 +480,16 @@ DiskQuotaShmemSize(void) void init_disk_quota_model(void) { - HASHCTL hash_ctl; + HASHCTL hash_ctl; /* initialize hash table for table/schema/role etc. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(TableEntryKey); + hash_ctl.keysize = sizeof(TableEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); - hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = tag_hash; + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = tag_hash; - table_size_map = hash_create("TableSizeEntry map", - 1024 * 8, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + table_size_map = hash_create("TableSizeEntry map", 1024 * 8, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); init_all_quota_maps(); @@ -531,27 +498,26 @@ init_disk_quota_model(void) * blackmap in shared memory */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(BlackMapEntry); + hash_ctl.keysize = sizeof(BlackMapEntry); hash_ctl.entrysize = sizeof(LocalBlackMapEntry); - hash_ctl.hcxt = CurrentMemoryContext; - hash_ctl.hash = tag_hash; + hash_ctl.hcxt = CurrentMemoryContext; + hash_ctl.hash = tag_hash; - local_disk_quota_black_map = hash_create("local blackmap whose quota limitation is reached", - MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, - &hash_ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_disk_quota_black_map = + hash_create("local blackmap whose quota limitation is reached", MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, + &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } /* * Check whether the diskquota state is ready -*/ + */ bool check_diskquota_state_is_ready(void) { - bool is_ready = false; - bool connected = false; - bool pushed_active_snap = false; - bool ret = true; + bool is_ready = false; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; StartTransactionCommand(); @@ -565,13 +531,12 @@ check_diskquota_state_is_ready(void) if (SPI_OK_CONNECT != SPI_connect()) { ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unable to connect to execute SPI query"))); + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; - is_ready = do_check_diskquota_state_is_ready(); + is_ready = do_check_diskquota_state_is_ready(); } PG_CATCH(); { @@ -584,10 +549,8 @@ check_diskquota_state_is_ready(void) RESUME_INTERRUPTS(); } PG_END_TRY(); - if (connected) - SPI_finish(); - if (pushed_active_snap) - PopActiveSnapshot(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); if (ret) CommitTransactionCommand(); else @@ -608,21 +571,23 @@ check_diskquota_state_is_ready(void) static bool do_check_diskquota_state_is_ready(void) { - int ret; - TupleDesc tupdesc; - int i; + int ret; + TupleDesc tupdesc; + int i; StringInfoData sql_command; initStringInfo(&sql_command); /* Add current database to the monitored db cache on all segments */ - appendStringInfo(&sql_command, - "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " - "FROM gp_dist_random('gp_id');", ADD_DB_TO_MONITOR); + appendStringInfo(&sql_command, + "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " + "FROM gp_dist_random('gp_id');", + ADD_DB_TO_MONITOR); ret = SPI_execute(sql_command.data, true, 0); - if (ret != SPI_OK_SELECT) { + if (ret != SPI_OK_SELECT) + { pfree(sql_command.data); ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); } pfree(sql_command.data); /* Add current database to the monitored db cache on coordinator */ @@ -634,28 +599,26 @@ do_check_diskquota_state_is_ready(void) ret = SPI_execute("select state from diskquota.state", true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 1 || - ((tupdesc)->attrs[0])->atttypid != INT4OID) + if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] table \"state\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] table \"state\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } for (i = 0; i < SPI_processed; i++) { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - int state; - bool isnull; + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + int state; + bool isnull; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - continue; + if (isnull) continue; state = DatumGetInt64(dat); if (state == DISKQUOTA_READY_STATE) @@ -664,7 +627,7 @@ do_check_diskquota_state_is_ready(void) } } ereport(WARNING, (errmsg("Diskquota is not in ready state. " - "please run UDF init_table_size_table()"))); + "please run UDF init_table_size_table()"))); return false; } @@ -678,21 +641,18 @@ void refresh_disk_quota_model(bool is_init) { SEGCOUNT = getgpsegmentCount(); - if (SEGCOUNT <= 0 ) + if (SEGCOUNT <= 0) { - ereport(ERROR, - (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); + ereport(ERROR, (errmsg("[diskquota] there is no active segment, SEGCOUNT is %d", SEGCOUNT))); } - if (is_init) - ereport(LOG, (errmsg("[diskquota] initialize quota model started"))); + if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model started"))); /* skip refresh model when load_quotas failed */ if (load_quotas()) { refresh_disk_quota_usage(is_init); } - if (is_init) - ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); + if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); } /* @@ -704,10 +664,10 @@ refresh_disk_quota_model(bool is_init) static void refresh_disk_quota_usage(bool is_init) { - bool connected = false; - bool pushed_active_snap = false; - bool ret = true; - HTAB *local_active_table_stat_map = NULL; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; + HTAB *local_active_table_stat_map = NULL; StartTransactionCommand(); @@ -721,8 +681,7 @@ refresh_disk_quota_usage(bool is_init) if (SPI_OK_CONNECT != SPI_connect()) { ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unable to connect to execute SPI query"))); + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -734,7 +693,8 @@ refresh_disk_quota_usage(bool is_init) local_active_table_stat_map = gp_fetch_active_tables(is_init); /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(is_init, local_active_table_stat_map); - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { check_quota_map(type); } /* flush local table_size_map to user table table_size */ @@ -742,8 +702,7 @@ refresh_disk_quota_usage(bool is_init) /* copy local black map back to shared black map */ flush_local_black_map(); /* Dispatch blackmap entries to segments to perform hard-limit. */ - if (diskquota_hardlimit) - dispatch_blackmap(local_active_table_stat_map); + if (diskquota_hardlimit) dispatch_blackmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } PG_CATCH(); @@ -757,10 +716,8 @@ refresh_disk_quota_usage(bool is_init) RESUME_INTERRUPTS(); } PG_END_TRY(); - if (connected) - SPI_finish(); - if (pushed_active_snap) - PopActiveSnapshot(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); if (ret) CommitTransactionCommand(); else @@ -769,10 +726,10 @@ refresh_disk_quota_usage(bool is_init) return; } -static List* +static List * merge_uncommitted_table_to_oidlist(List *oidlist) { - HASH_SEQ_STATUS iter; + HASH_SEQ_STATUS iter; DiskQuotaRelationCacheEntry *entry; if (relation_cache == NULL) @@ -809,16 +766,16 @@ merge_uncommitted_table_to_oidlist(List *oidlist) static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { - bool table_size_map_found; - bool active_tbl_found; - int64 updated_total_size; - TableSizeEntry *tsentry = NULL; - Oid relOid; - HASH_SEQ_STATUS iter; + bool table_size_map_found; + bool active_tbl_found; + int64 updated_total_size; + TableSizeEntry *tsentry = NULL; + Oid relOid; + HASH_SEQ_STATUS iter; DiskQuotaActiveTableEntry *active_table_entry; - TableEntryKey key; - List *oidlist; - ListCell *l; + TableEntryKey key; + List *oidlist; + ListCell *l; /* * unset is_exist flag for tsentry in table_size_map this is used to @@ -838,30 +795,29 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) oidlist = get_rel_oid_list(); oidlist = merge_uncommitted_table_to_oidlist(oidlist); - - foreach(l, oidlist) + + foreach (l, oidlist) { - HeapTuple classTup; - Form_pg_class classForm = NULL; - Oid relnamespace = InvalidOid; - Oid relowner = InvalidOid; - Oid reltablespace = InvalidOid; - relOid = lfirst_oid(l); + HeapTuple classTup; + Form_pg_class classForm = NULL; + Oid relnamespace = InvalidOid; + Oid relowner = InvalidOid; + Oid reltablespace = InvalidOid; + relOid = lfirst_oid(l); classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); if (HeapTupleIsValid(classTup)) { - classForm = (Form_pg_class) GETSTRUCT(classTup); - relnamespace = classForm->relnamespace; - relowner = classForm->relowner; + classForm = (Form_pg_class)GETSTRUCT(classTup); + relnamespace = classForm->relnamespace; + relowner = classForm->relowner; reltablespace = classForm->reltablespace; if (!OidIsValid(reltablespace)) { reltablespace = MyDatabaseTableSpace; } - } - else + } else { LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); DiskQuotaRelationCacheEntry *relation_entry = hash_search(relation_cache, &relOid, HASH_FIND, NULL); @@ -871,8 +827,8 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) LWLockRelease(diskquota_locks.relation_cache_lock); continue; } - relnamespace = relation_entry->namespaceoid; - relowner = relation_entry->owneroid; + relnamespace = relation_entry->namespaceoid; + relowner = relation_entry->owneroid; reltablespace = relation_entry->rnode.node.spcNode; LWLockRelease(diskquota_locks.relation_cache_lock); } @@ -884,27 +840,25 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) */ for (int i = -1; i < SEGCOUNT; i++) { - key.segid = i; + key.segid = i; key.reloid = relOid; - tsentry = (TableSizeEntry *) hash_search(table_size_map, - &key, - HASH_ENTER, &table_size_map_found); + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); if (!table_size_map_found) { - tsentry->reloid = relOid; - tsentry->segid = key.segid; - tsentry->totalsize = 0; - tsentry->owneroid = InvalidOid; - tsentry->namespaceoid = InvalidOid; + tsentry->reloid = relOid; + tsentry->segid = key.segid; + tsentry->totalsize = 0; + tsentry->owneroid = InvalidOid; + tsentry->namespaceoid = InvalidOid; tsentry->tablespaceoid = InvalidOid; - tsentry->need_flush = true; + tsentry->need_flush = true; } /* mark tsentry is_exist */ - if (tsentry) - tsentry->is_exist = true; - active_table_entry = (DiskQuotaActiveTableEntry *) hash_search(local_active_table_stat_map, &key, HASH_FIND, &active_tbl_found); + if (tsentry) tsentry->is_exist = true; + active_table_entry = (DiskQuotaActiveTableEntry *)hash_search(local_active_table_stat_map, &key, HASH_FIND, + &active_tbl_found); /* skip to recalculate the tables which are not in active list */ if (active_tbl_found) @@ -917,21 +871,22 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) active_table_entry->tablesize += calculate_table_size(relOid); Gp_role = GP_ROLE_DISPATCH; - } /* firstly calculate the updated total size of a table */ updated_total_size = active_table_entry->tablesize - tsentry->totalsize; /* update the table_size entry */ - tsentry->totalsize = (int64) active_table_entry->tablesize; + tsentry->totalsize = (int64)active_table_entry->tablesize; tsentry->need_flush = true; - /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does not exist in the table_size_map */ + /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does + * not exist in the table_size_map */ update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, key.segid); update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, key.segid); - update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, key.segid); - update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, key.segid); - + update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, key.segid); + update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, key.segid); } /* table size info doesn't need to flush at init quota model stage */ if (is_init) @@ -942,56 +897,32 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) /* if schema change, transfer the file size */ if (tsentry->namespaceoid != relnamespace) { - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid}, - (Oid[]){relnamespace}, - key.segid); - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){relnamespace, tsentry->tablespaceoid}, - key.segid); + transfer_table_for_quota(tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, + (Oid[]){relnamespace}, key.segid); + transfer_table_for_quota(tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, + (Oid[]){relnamespace, tsentry->tablespaceoid}, key.segid); tsentry->namespaceoid = relnamespace; } /* if owner change, transfer the file size */ if (tsentry->owneroid != relowner) { - transfer_table_for_quota( - tsentry->totalsize, - ROLE_QUOTA, - (Oid[]){tsentry->owneroid}, - (Oid[]){relowner}, - key.segid - ); - transfer_table_for_quota( - tsentry->totalsize, - ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){relowner, tsentry->tablespaceoid}, - key.segid - ); + transfer_table_for_quota(tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, (Oid[]){relowner}, + key.segid); + transfer_table_for_quota(tsentry->totalsize, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, + (Oid[]){relowner, tsentry->tablespaceoid}, key.segid); tsentry->owneroid = relowner; } if (tsentry->tablespaceoid != reltablespace) { - transfer_table_for_quota( - tsentry->totalsize, - NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){tsentry->namespaceoid, reltablespace}, - key.segid - ); - transfer_table_for_quota( - tsentry->totalsize, - ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){tsentry->owneroid, reltablespace}, - key.segid - ); + transfer_table_for_quota(tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, + (Oid[]){tsentry->namespaceoid, reltablespace}, key.segid); + transfer_table_for_quota(tsentry->totalsize, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, + (Oid[]){tsentry->owneroid, reltablespace}, key.segid); tsentry->tablespaceoid = reltablespace; } } @@ -1014,8 +945,10 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, tsentry->segid); update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, tsentry->segid); - update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, tsentry->segid); - update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, tsentry->segid); + update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, tsentry->segid); + update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, tsentry->segid); } } } @@ -1031,19 +964,19 @@ flush_to_table_size(void) { HASH_SEQ_STATUS iter; TableSizeEntry *tsentry = NULL; - StringInfoData delete_statement; - StringInfoData insert_statement; - StringInfoData deleted_table_expr; - bool delete_statement_flag = false; - bool insert_statement_flag = false; - int ret; - int extMajorVersion= get_ext_major_version(); + StringInfoData delete_statement; + StringInfoData insert_statement; + StringInfoData deleted_table_expr; + bool delete_statement_flag = false; + bool insert_statement_flag = false; + int ret; + int extMajorVersion = get_ext_major_version(); /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ /* Disable ORCA since it does not support non-scalar subqueries. */ bool old_optimizer = optimizer; - optimizer = false; + optimizer = false; initStringInfo(&deleted_table_expr); appendStringInfo(&deleted_table_expr, "WITH deleted_table AS ( VALUES "); @@ -1065,15 +998,12 @@ flush_to_table_size(void) appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } delete_statement_flag = true; - hash_search(table_size_map, - &tsentry->reloid, - HASH_REMOVE, NULL); + hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); } /* update the table size by delete+insert in table table_size */ else if (tsentry->need_flush == true) @@ -1092,14 +1022,14 @@ flush_to_table_size(void) break; case 2: appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); - appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); + appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, + tsentry->segid); delete_statement_flag = true; insert_statement_flag = true; break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } } } @@ -1112,31 +1042,33 @@ flush_to_table_size(void) { /* concatenate all the need_to_flush table to SQL string */ initStringInfo(&delete_statement); - appendStringInfoString(&delete_statement, (const char *) deleted_table_expr.data); + appendStringInfoString(&delete_statement, (const char *)deleted_table_expr.data); switch (extMajorVersion) { case 1: - appendStringInfo(&delete_statement, "delete from diskquota.table_size where tableid in ( SELECT * FROM deleted_table );"); + appendStringInfo(&delete_statement, + "delete from diskquota.table_size where tableid in ( SELECT * FROM deleted_table );"); break; case 2: - appendStringInfo(&delete_statement, "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); + appendStringInfo( + &delete_statement, + "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } ret = SPI_execute(delete_statement.data, false, 0); if (ret != SPI_OK_DELETE) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); + errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); } if (insert_statement_flag) { ret = SPI_execute(insert_statement.data, false, 0); if (ret != SPI_OK_INSERT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); + errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); } optimizer = old_optimizer; @@ -1150,10 +1082,10 @@ flush_to_table_size(void) static void flush_local_black_map(void) { - HASH_SEQ_STATUS iter; - LocalBlackMapEntry *localblackentry; + HASH_SEQ_STATUS iter; + LocalBlackMapEntry *localblackentry; GlobalBlackMapEntry *blackentry; - bool found; + bool found; LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); @@ -1162,40 +1094,33 @@ flush_local_black_map(void) { if (localblackentry->isexceeded) { - blackentry = (GlobalBlackMapEntry *) hash_search(disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_ENTER_NULL, &found); + blackentry = (GlobalBlackMapEntry *)hash_search(disk_quota_black_map, (void *)&localblackentry->keyitem, + HASH_ENTER_NULL, &found); if (blackentry == NULL) { ereport(WARNING, (errmsg("[diskquota] Shared disk quota black map size limit reached." - "Some out-of-limit schemas or roles will be lost" - "in blacklist."))); - } - else + "Some out-of-limit schemas or roles will be lost" + "in blacklist."))); + } else { /* new db objects which exceed quota limit */ if (!found) { - blackentry->keyitem.targetoid = localblackentry->keyitem.targetoid; - blackentry->keyitem.databaseoid = MyDatabaseId; - blackentry->keyitem.targettype = localblackentry->keyitem.targettype; + blackentry->keyitem.targetoid = localblackentry->keyitem.targetoid; + blackentry->keyitem.databaseoid = MyDatabaseId; + blackentry->keyitem.targettype = localblackentry->keyitem.targettype; blackentry->keyitem.tablespaceoid = localblackentry->keyitem.tablespaceoid; - blackentry->segexceeded = localblackentry->segexceeded; + blackentry->segexceeded = localblackentry->segexceeded; } } - blackentry->segexceeded = localblackentry->segexceeded; - localblackentry->isexceeded = false; + blackentry->segexceeded = localblackentry->segexceeded; + localblackentry->isexceeded = false; localblackentry->segexceeded = false; - } - else + } else { /* db objects are removed or under quota limit in the new loop */ - (void) hash_search(disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_REMOVE, NULL); - (void) hash_search(local_disk_quota_black_map, - (void *) &localblackentry->keyitem, - HASH_REMOVE, NULL); + (void)hash_search(disk_quota_black_map, (void *)&localblackentry->keyitem, HASH_REMOVE, NULL); + (void)hash_search(local_disk_quota_black_map, (void *)&localblackentry->keyitem, HASH_REMOVE, NULL); } } LWLockRelease(diskquota_locks.black_map_lock); @@ -1207,14 +1132,14 @@ flush_local_black_map(void) static void dispatch_blackmap(HTAB *local_active_table_stat_map) { - HASH_SEQ_STATUS hash_seq; - GlobalBlackMapEntry *blackmap_entry; - DiskQuotaActiveTableEntry *active_table_entry; - int num_entries, count = 0; - CdbPgResults cdb_pgresults = {NULL, 0}; - StringInfoData rows; - StringInfoData active_oids; - StringInfoData sql; + HASH_SEQ_STATUS hash_seq; + GlobalBlackMapEntry *blackmap_entry; + DiskQuotaActiveTableEntry *active_table_entry; + int num_entries, count = 0; + CdbPgResults cdb_pgresults = {NULL, 0}; + StringInfoData rows; + StringInfoData active_oids; + StringInfoData sql; initStringInfo(&rows); initStringInfo(&active_oids); @@ -1225,35 +1150,29 @@ dispatch_blackmap(HTAB *local_active_table_stat_map) hash_seq_init(&hash_seq, disk_quota_black_map); while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) { - appendStringInfo(&rows, - "ROW(%d, %d, %d, %d, %s)", - blackmap_entry->keyitem.targetoid, - blackmap_entry->keyitem.databaseoid, - blackmap_entry->keyitem.tablespaceoid, - blackmap_entry->keyitem.targettype, - blackmap_entry->segexceeded ? "true" : "false"); - - if (++count != num_entries) - appendStringInfo(&rows, ","); + appendStringInfo(&rows, "ROW(%d, %d, %d, %d, %s)", blackmap_entry->keyitem.targetoid, + blackmap_entry->keyitem.databaseoid, blackmap_entry->keyitem.tablespaceoid, + blackmap_entry->keyitem.targettype, blackmap_entry->segexceeded ? "true" : "false"); + + if (++count != num_entries) appendStringInfo(&rows, ","); } LWLockRelease(diskquota_locks.black_map_lock); - count = 0; + count = 0; num_entries = hash_get_num_entries(local_active_table_stat_map); hash_seq_init(&hash_seq, local_active_table_stat_map); while ((active_table_entry = hash_seq_search(&hash_seq)) != NULL) { - appendStringInfo(&active_oids, - "%d", active_table_entry->reloid); + appendStringInfo(&active_oids, "%d", active_table_entry->reloid); - if (++count != num_entries) - appendStringInfo(&active_oids, ","); + if (++count != num_entries) appendStringInfo(&active_oids, ","); } appendStringInfo(&sql, - "select diskquota.refresh_blackmap(" - "ARRAY[%s]::diskquota.blackmap_entry[], " - "ARRAY[%s]::oid[])", rows.data, active_oids.data); + "select diskquota.refresh_blackmap(" + "ARRAY[%s]::diskquota.blackmap_entry[], " + "ARRAY[%s]::oid[])", + rows.data, active_oids.data); CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); pfree(rows.data); @@ -1268,12 +1187,10 @@ dispatch_blackmap(HTAB *local_active_table_stat_map) void truncateStringInfo(StringInfo str, int nchars) { - if (str && - str->len > nchars) + if (str && str->len > nchars) { - Assert(str->data != NULL && - str->len <= str->maxlen); - str->len = nchars; + Assert(str->data != NULL && str->len <= str->maxlen); + str->len = nchars; str->data[nchars] = '\0'; } } @@ -1284,9 +1201,9 @@ truncateStringInfo(StringInfo str, int nchars) static bool load_quotas(void) { - bool connected = false; - bool pushed_active_snap = false; - bool ret = true; + bool connected = false; + bool pushed_active_snap = false; + bool ret = true; StartTransactionCommand(); @@ -1300,9 +1217,8 @@ load_quotas(void) int ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unable to connect to execute SPI query, return code: %d", ret_code))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unable to connect to execute SPI query, return code: %d", ret_code))); } connected = true; PushActiveSnapshot(GetTransactionSnapshot()); @@ -1320,10 +1236,8 @@ load_quotas(void) RESUME_INTERRUPTS(); } PG_END_TRY(); - if (connected) - SPI_finish(); - if (pushed_active_snap) - PopActiveSnapshot(); + if (connected) SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); if (ret) CommitTransactionCommand(); else @@ -1334,14 +1248,14 @@ load_quotas(void) /* * Load quotas from diskquota configuration table(quota_config). -*/ + */ static void do_load_quotas(void) { - int ret; - TupleDesc tupdesc; - int i; - int extMajorVersion; + int ret; + TupleDesc tupdesc; + int i; + int extMajorVersion; /* * TODO: we should skip to reload quota config when there is no change in @@ -1355,7 +1269,7 @@ do_load_quotas(void) * read quotas from diskquota.quota_config and target table */ - /* + /* * We need to check the extension version. * Why do we need this? * As when we upgrade diskquota extension from an old to a new version, @@ -1365,74 +1279,73 @@ do_load_quotas(void) * the init work will fail and diskquota can not work correctly. * Maybe this is not the best sulotion, only a work arround. Optimizing * the init procedure is a better solution. - */ + */ switch (extMajorVersion) { case 1: - ret = SPI_execute("select targetoid, quotatype, quotalimitMB, 0 as segratio, 0 as tablespaceoid from diskquota.quota_config", true, 0); + ret = SPI_execute( + "select targetoid, quotatype, quotalimitMB, 0 as segratio, 0 as tablespaceoid from " + "diskquota.quota_config", + true, 0); break; case 2: ret = SPI_execute( - "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, COALESCE(t.tablespaceoid, 0) AS tablespaceoid " - "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " - "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", true, 0); + "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, " + "COALESCE(t.tablespaceoid, 0) AS tablespaceoid " + "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " + "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", + true, 0); break; default: - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); } if (ret != SPI_OK_SELECT) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || - ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT4OID || - ((tupdesc)->attrs[2])->atttypid != INT8OID) + if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0])->atttypid != OIDOID || + ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] configuration table is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] configuration table is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } for (i = 0; i < SPI_processed; i++) { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum vals[NUM_QUOTA_CONFIG_ATTRS]; - bool isnull[NUM_QUOTA_CONFIG_ATTRS]; + HeapTuple tup = SPI_tuptable->vals[i]; + Datum vals[NUM_QUOTA_CONFIG_ATTRS]; + bool isnull[NUM_QUOTA_CONFIG_ATTRS]; for (int i = 0; i < NUM_QUOTA_CONFIG_ATTRS; ++i) { vals[i] = SPI_getbinval(tup, tupdesc, i + 1, &(isnull[i])); if (i <= 2 && isnull[i]) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] attibutes in configuration table MUST NOT be NULL"))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] attibutes in configuration table MUST NOT be NULL"))); } } - Oid targetOid = DatumGetObjectId(vals[0]); - int quotaType = (QuotaType) DatumGetInt32(vals[1]); - int64 quota_limit_mb = DatumGetInt64(vals[2]); - float segratio = DatumGetFloat4(vals[3]); - Oid spcOid = DatumGetObjectId(vals[4]); + Oid targetOid = DatumGetObjectId(vals[0]); + int quotaType = (QuotaType)DatumGetInt32(vals[1]); + int64 quota_limit_mb = DatumGetInt64(vals[2]); + float segratio = DatumGetFloat4(vals[3]); + Oid spcOid = DatumGetObjectId(vals[4]); if (spcOid == InvalidOid) { - if (quota_info[quotaType].num_keys != 1) { + if (quota_info[quotaType].num_keys != 1) + { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d. num_keys: %d", - quotaType, quota_info[quotaType].num_keys))); + errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d. num_keys: %d", + quotaType, quota_info[quotaType].num_keys))); } update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); - } - else + } else { update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid, spcOid}); } @@ -1447,16 +1360,16 @@ do_load_quotas(void) static bool get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *tablespaceoid) { - HeapTuple tp; + HeapTuple tp; - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); bool found = HeapTupleIsValid(tp); if (HeapTupleIsValid(tp)) { - Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp); + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); - *ownerOid = reltup->relowner; - *nsOid = reltup->relnamespace; + *ownerOid = reltup->relowner; + *nsOid = reltup->relnamespace; *tablespaceoid = reltup->reltablespace; if (!OidIsValid(*tablespaceoid)) @@ -1472,9 +1385,9 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table static bool check_blackmap_by_relfilenode(RelFileNode relfilenode) { - bool found; - BlackMapEntry keyitem; - GlobalBlackMapEntry *entry; + bool found; + BlackMapEntry keyitem; + GlobalBlackMapEntry *entry; SIMPLE_FAULT_INJECTOR("check_blackmap_by_relfilenode"); @@ -1482,8 +1395,7 @@ check_blackmap_by_relfilenode(RelFileNode relfilenode) memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); - entry = hash_search(disk_quota_black_map, - &keyitem, HASH_FIND, &found); + entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); if (found && entry) { @@ -1504,8 +1416,7 @@ check_blackmap_by_relfilenode(RelFileNode relfilenode) * prepares the searching key of the global blackmap for us. */ static void -prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, - Oid relowner, Oid relnamespace, Oid reltablespace) +prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, Oid relowner, Oid relnamespace, Oid reltablespace) { Assert(keyitem != NULL); memset(keyitem, 0, sizeof(BlackMapEntry)); @@ -1514,9 +1425,7 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) keyitem->targetoid = relnamespace; else - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown quota type: %d", type))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unknown quota type: %d", type))); if (type == ROLE_TABLESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) keyitem->tablespaceoid = reltablespace; @@ -1526,7 +1435,7 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, keyitem->tablespaceoid = InvalidOid; } keyitem->databaseoid = MyDatabaseId; - keyitem->targettype = type; + keyitem->targettype = type; } /* @@ -1537,11 +1446,11 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, static bool check_blackmap_by_reloid(Oid reloid) { - Oid ownerOid = InvalidOid; - Oid nsOid = InvalidOid; - Oid tablespaceoid = InvalidOid; - bool found; - BlackMapEntry keyitem; + Oid ownerOid = InvalidOid; + Oid nsOid = InvalidOid; + Oid tablespaceoid = InvalidOid; + bool found; + BlackMapEntry keyitem; GlobalBlackMapEntry *entry; bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespaceoid); @@ -1554,9 +1463,7 @@ check_blackmap_by_reloid(Oid reloid) for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { prepare_blackmap_search_key(&keyitem, type, ownerOid, nsOid, tablespaceoid); - entry = hash_search(disk_quota_black_map, - &keyitem, - HASH_FIND, &found); + entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); if (found) { LWLockRelease(diskquota_locks.black_map_lock); @@ -1576,26 +1483,21 @@ check_blackmap_by_reloid(Oid reloid) bool quota_check_common(Oid reloid, RelFileNode *relfilenode) { - bool enable_hardlimit; + bool enable_hardlimit; - if (!IsTransactionState()) - return true; + if (!IsTransactionState()) return true; - if (diskquota_is_paused()) - return true; + if (diskquota_is_paused()) return true; - if (OidIsValid(reloid)) - return check_blackmap_by_reloid(reloid); + if (OidIsValid(reloid)) return check_blackmap_by_reloid(reloid); enable_hardlimit = diskquota_hardlimit; #ifdef FAULT_INJECTOR - if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) - enable_hardlimit = true; + if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; #endif - if (relfilenode && enable_hardlimit) - return check_blackmap_by_relfilenode(*relfilenode); + if (relfilenode && enable_hardlimit) return check_blackmap_by_relfilenode(*relfilenode); return true; } @@ -1606,7 +1508,7 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) void invalidate_database_blackmap(Oid dbid) { - BlackMapEntry *entry; + BlackMapEntry *entry; HASH_SEQ_STATUS iter; LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); @@ -1626,7 +1528,7 @@ GetNamespaceName(Oid spcid, bool skip_name) { if (skip_name) { - NameData spcstr; + NameData spcstr; pg_ltoa(spcid, spcstr.data); return pstrdup(spcstr.data); } @@ -1638,7 +1540,7 @@ GetTablespaceName(Oid spcid, bool skip_name) { if (skip_name) { - NameData spcstr; + NameData spcstr; pg_ltoa(spcid, spcstr.data); return pstrdup(spcstr.data); } @@ -1650,7 +1552,7 @@ GetUserName(Oid relowner, bool skip_name) { if (skip_name) { - NameData namestr; + NameData namestr; pg_ltoa(relowner, namestr.data); return pstrdup(namestr.data); } @@ -1661,42 +1563,42 @@ static void export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) { BlackMapEntry *blackentry = &entry->keyitem; - switch(blackentry->targettype) + switch (blackentry->targettype) { case NAMESPACE_QUOTA: - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("schema's disk space quota exceeded with name:%s", GetNamespaceName(blackentry->targetoid, skip_name)))); + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name:%s", + GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_QUOTA: - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("role's disk space quota exceeded with name:%s", GetUserName(blackentry->targetoid, skip_name)))); + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name:%s", + GetUserName(blackentry->targetoid, skip_name)))); break; case NAMESPACE_TABLESPACE_QUOTA: if (entry->segexceeded) - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", + GetTablespaceName(blackentry->tablespaceoid, skip_name), + GetNamespaceName(blackentry->targetoid, skip_name)))); else ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace:%s schema:%s diskquota exceeded", + GetTablespaceName(blackentry->tablespaceoid, skip_name), + GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_TABLESPACE_QUOTA: if (entry->segexceeded) - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s role:%s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), + errmsg("tablespace:%s role:%s diskquota exceeded per segment quota", + GetTablespaceName(blackentry->tablespaceoid, skip_name), + GetUserName(blackentry->targetoid, skip_name)))); else ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s role:%s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace:%s role:%s diskquota exceeded", + GetTablespaceName(blackentry->tablespaceoid, skip_name), + GetUserName(blackentry->targetoid, skip_name)))); break; - default : - ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), - errmsg("diskquota exceeded, unknown quota type"))); + default: + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("diskquota exceeded, unknown quota type"))); } } @@ -1717,26 +1619,25 @@ PG_FUNCTION_INFO_V1(refresh_blackmap); Datum refresh_blackmap(PG_FUNCTION_ARGS) { - ArrayType *blackmap_array_type = PG_GETARG_ARRAYTYPE_P(0); - ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); - Oid blackmap_elem_type = ARR_ELEMTYPE(blackmap_array_type); - Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); - Datum *datums; - bool *nulls; - int16 elem_width; - bool elem_type_by_val; - char elem_alignment_code; - int count; - HeapTupleHeader lt; - bool segexceeded; - GlobalBlackMapEntry *blackmapentry; - HASH_SEQ_STATUS hash_seq; - HTAB *local_blackmap; - HASHCTL hashctl; - int ret_code; - - if (!superuser()) - errmsg("must be superuser to update blackmap"); + ArrayType *blackmap_array_type = PG_GETARG_ARRAYTYPE_P(0); + ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); + Oid blackmap_elem_type = ARR_ELEMTYPE(blackmap_array_type); + Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); + Datum *datums; + bool *nulls; + int16 elem_width; + bool elem_type_by_val; + char elem_alignment_code; + int count; + HeapTupleHeader lt; + bool segexceeded; + GlobalBlackMapEntry *blackmapentry; + HASH_SEQ_STATUS hash_seq; + HTAB *local_blackmap; + HASHCTL hashctl; + int ret_code; + + if (!superuser()) errmsg("must be superuser to update blackmap"); if (ARR_NDIM(blackmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); @@ -1750,9 +1651,8 @@ refresh_blackmap(PG_FUNCTION_ARGS) ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query, return code: %d", ret_code))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query, return code: %d", ret_code))); /* * Secondly, iterate over blackmap entries and add these entries to the local black map @@ -1760,10 +1660,10 @@ refresh_blackmap(PG_FUNCTION_ARGS) * should be blacked in O(1) time complexity in third step. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(BlackMapEntry); + hashctl.keysize = sizeof(BlackMapEntry); hashctl.entrysize = sizeof(GlobalBlackMapEntry); - hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; /* * Since uncommitted relations' information and the global blackmap entries @@ -1772,22 +1672,19 @@ refresh_blackmap(PG_FUNCTION_ARGS) * entries into the local_blackmap below and then flush the content of the * local_blackmap to the global blackmap at the end of this UDF. */ - local_blackmap = hash_create("local_blackmap", - 1024, &hashctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_blackmap = hash_create("local_blackmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); get_typlenbyvalalign(blackmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); - deconstruct_array(blackmap_array_type, blackmap_elem_type, elem_width, - elem_type_by_val, elem_alignment_code, &datums, &nulls, &count); + deconstruct_array(blackmap_array_type, blackmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, + &datums, &nulls, &count); for (int i = 0; i < count; ++i) { - BlackMapEntry keyitem; - bool isnull; + BlackMapEntry keyitem; + bool isnull; - if (nulls[i]) - continue; + if (nulls[i]) continue; memset(&keyitem, 0, sizeof(BlackMapEntry)); - lt = DatumGetHeapTupleHeader(datums[i]); + lt = DatumGetHeapTupleHeader(datums[i]); keyitem.targetoid = DatumGetObjectId(GetAttributeByNum(lt, 1, &isnull)); keyitem.databaseoid = DatumGetObjectId(GetAttributeByNum(lt, 2, &isnull)); keyitem.tablespaceoid = DatumGetObjectId(GetAttributeByNum(lt, 3, &isnull)); @@ -1797,11 +1694,10 @@ refresh_blackmap(PG_FUNCTION_ARGS) { Assert(OidIsValid(keyitem.tablespaceoid)); } - segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); + segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); blackmapentry = hash_search(local_blackmap, &keyitem, HASH_ENTER_NULL, NULL); - if (blackmapentry) - blackmapentry->segexceeded = segexceeded; + if (blackmapentry) blackmapentry->segexceeded = segexceeded; } /* @@ -1810,36 +1706,32 @@ refresh_blackmap(PG_FUNCTION_ARGS) * index relations to the global black map. */ get_typlenbyvalalign(active_oid_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); - deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, - elem_type_by_val, elem_alignment_code, &datums, &nulls, &count); + deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, elem_type_by_val, elem_alignment_code, + &datums, &nulls, &count); for (int i = 0; i < count; ++i) { - Oid active_oid = InvalidOid; - HeapTuple tuple; - if (nulls[i]) - continue; + Oid active_oid = InvalidOid; + HeapTuple tuple; + if (nulls[i]) continue; active_oid = DatumGetObjectId(datums[i]); - if (!OidIsValid(active_oid)) - continue; + if (!OidIsValid(active_oid)) continue; tuple = SearchSysCacheCopy1(RELOID, active_oid); if (HeapTupleIsValid(tuple)) { - Form_pg_class form = (Form_pg_class) GETSTRUCT(tuple); - Oid relnamespace = form->relnamespace; - Oid reltablespace = OidIsValid(form->reltablespace) ? - form->reltablespace : MyDatabaseTableSpace; - Oid relowner = form->relowner; - BlackMapEntry keyitem; - bool found; + Form_pg_class form = (Form_pg_class)GETSTRUCT(tuple); + Oid relnamespace = form->relnamespace; + Oid reltablespace = OidIsValid(form->reltablespace) ? form->reltablespace : MyDatabaseTableSpace; + Oid relowner = form->relowner; + BlackMapEntry keyitem; + bool found; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { /* Check that if the current relation should be blocked. */ prepare_blackmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); - blackmapentry = hash_search(local_blackmap, - &keyitem, HASH_FIND, &found); + blackmapentry = hash_search(local_blackmap, &keyitem, HASH_FIND, &found); if (found && blackmapentry) { /* @@ -1847,13 +1739,13 @@ refresh_blackmap(PG_FUNCTION_ARGS) * of itself together with the relfilenodes of its toast relation and * appendonly relations to the global black map. */ - List *oid_list = NIL; - ListCell *cell = NULL; - Oid toastrelid = form->reltoastrelid; - Oid aosegrelid = InvalidOid; - Oid aoblkdirrelid = InvalidOid; - Oid aovisimaprelid = InvalidOid; - oid_list = lappend_oid(oid_list, active_oid); + List *oid_list = NIL; + ListCell *cell = NULL; + Oid toastrelid = form->reltoastrelid; + Oid aosegrelid = InvalidOid; + Oid aoblkdirrelid = InvalidOid; + Oid aovisimaprelid = InvalidOid; + oid_list = lappend_oid(oid_list, active_oid); /* Append toast relation and toast index to the oid_list if any. */ if (OidIsValid(toastrelid)) @@ -1863,8 +1755,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) } /* Append ao auxiliary relations and their indexes to the oid_list if any. */ - diskquota_get_appendonly_aux_oid_list(active_oid, &aosegrelid, - &aoblkdirrelid, &aovisimaprelid); + diskquota_get_appendonly_aux_oid_list(active_oid, &aosegrelid, &aoblkdirrelid, &aovisimaprelid); if (OidIsValid(aosegrelid)) { oid_list = lappend_oid(oid_list, aosegrelid); @@ -1882,30 +1773,27 @@ refresh_blackmap(PG_FUNCTION_ARGS) } /* Iterate over the oid_list and add their relfilenodes to the blackmap. */ - foreach(cell, oid_list) + foreach (cell, oid_list) { - Oid curr_oid = lfirst_oid(cell); + Oid curr_oid = lfirst_oid(cell); HeapTuple curr_tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(curr_oid)); if (HeapTupleIsValid(curr_tuple)) { - Form_pg_class curr_form = (Form_pg_class) GETSTRUCT(curr_tuple); - Oid curr_reltablespace = - OidIsValid(curr_form->reltablespace) ? - curr_form->reltablespace : MyDatabaseTableSpace; - RelFileNode relfilenode = - { .dbNode = MyDatabaseId, - .relNode = curr_form->relfilenode, - .spcNode = curr_reltablespace }; - bool found; - GlobalBlackMapEntry *blocked_filenode_entry; - BlackMapEntry blocked_filenode_keyitem; + Form_pg_class curr_form = (Form_pg_class)GETSTRUCT(curr_tuple); + Oid curr_reltablespace = OidIsValid(curr_form->reltablespace) ? curr_form->reltablespace + : MyDatabaseTableSpace; + RelFileNode relfilenode = {.dbNode = MyDatabaseId, + .relNode = curr_form->relfilenode, + .spcNode = curr_reltablespace}; + bool found; + GlobalBlackMapEntry *blocked_filenode_entry; + BlackMapEntry blocked_filenode_keyitem; memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); memcpy(&blocked_filenode_keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); - blocked_filenode_entry = hash_search(local_blackmap, - &blocked_filenode_keyitem, - HASH_ENTER_NULL, &found); + blocked_filenode_entry = + hash_search(local_blackmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); if (!found && blocked_filenode_entry) { memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); @@ -1920,24 +1808,22 @@ refresh_blackmap(PG_FUNCTION_ARGS) break; } } - } - else + } else { /* * We cannot fetch the relation from syscache. It may be an uncommitted relation. * Let's try to fetch it from relation_cache. */ - DiskQuotaRelationCacheEntry *relation_cache_entry; - bool found; + DiskQuotaRelationCacheEntry *relation_cache_entry; + bool found; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); - relation_cache_entry = hash_search(relation_cache, &active_oid, - HASH_FIND, &found); + relation_cache_entry = hash_search(relation_cache, &active_oid, HASH_FIND, &found); if (found && relation_cache_entry) { - Oid relnamespace = relation_cache_entry->namespaceoid; - Oid reltablespace = relation_cache_entry->rnode.node.spcNode; - Oid relowner = relation_cache_entry->owneroid; - BlackMapEntry keyitem; + Oid relnamespace = relation_cache_entry->namespaceoid; + Oid reltablespace = relation_cache_entry->rnode.node.spcNode; + Oid relowner = relation_cache_entry->owneroid; + BlackMapEntry keyitem; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { /* Check that if the current relation should be blocked. */ @@ -1946,32 +1832,30 @@ refresh_blackmap(PG_FUNCTION_ARGS) if (found && blackmapentry) { - List *oid_list = NIL; - ListCell *cell = NULL; + List *oid_list = NIL; + ListCell *cell = NULL; /* Collect the relation oid together with its auxiliary relations' oid. */ oid_list = lappend_oid(oid_list, active_oid); for (int auxoidcnt = 0; auxoidcnt < relation_cache_entry->auxrel_num; ++auxoidcnt) oid_list = lappend_oid(oid_list, relation_cache_entry->auxrel_oid[auxoidcnt]); - foreach(cell, oid_list) + foreach (cell, oid_list) { - bool found; - GlobalBlackMapEntry *blocked_filenode_entry; - BlackMapEntry blocked_filenode_keyitem; - Oid curr_oid = lfirst_oid(cell); + bool found; + GlobalBlackMapEntry *blocked_filenode_entry; + BlackMapEntry blocked_filenode_keyitem; + Oid curr_oid = lfirst_oid(cell); - relation_cache_entry = hash_search(relation_cache, - &curr_oid, HASH_FIND, &found); + relation_cache_entry = hash_search(relation_cache, &curr_oid, HASH_FIND, &found); if (found && relation_cache_entry) { memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); - memcpy(&blocked_filenode_keyitem.relfilenode, - &relation_cache_entry->rnode.node, sizeof(RelFileNode)); + memcpy(&blocked_filenode_keyitem.relfilenode, &relation_cache_entry->rnode.node, + sizeof(RelFileNode)); - blocked_filenode_entry = hash_search(local_blackmap, - &blocked_filenode_keyitem, - HASH_ENTER_NULL, &found); + blocked_filenode_entry = + hash_search(local_blackmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); if (!found && blocked_filenode_entry) { memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); @@ -1991,10 +1875,9 @@ refresh_blackmap(PG_FUNCTION_ARGS) hash_seq_init(&hash_seq, local_blackmap); while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) { - bool found; - GlobalBlackMapEntry *new_entry; - new_entry = hash_search(disk_quota_black_map, &blackmapentry->keyitem, - HASH_ENTER_NULL, &found); + bool found; + GlobalBlackMapEntry *new_entry; + new_entry = hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_ENTER_NULL, &found); /* * We don't perform soft-limit on segment servers, so we don't flush the * blackmap entry with a valid targetoid to the global blackmap on segment @@ -2018,19 +1901,20 @@ PG_FUNCTION_INFO_V1(show_blackmap); Datum show_blackmap(PG_FUNCTION_ARGS) { - FuncCallContext *funcctx; - GlobalBlackMapEntry *blackmap_entry; - struct BlackMapCtx { - HASH_SEQ_STATUS blackmap_seq; - HTAB *blackmap; - } *blackmap_ctx; + FuncCallContext *funcctx; + GlobalBlackMapEntry *blackmap_entry; + struct BlackMapCtx + { + HASH_SEQ_STATUS blackmap_seq; + HTAB *blackmap; + } * blackmap_ctx; if (SRF_IS_FIRSTCALL()) { - TupleDesc tupdesc; - MemoryContext oldcontext; - HASHCTL hashctl; - HASH_SEQ_STATUS hash_seq; + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; /* Create a function context for cross-call persistence. */ funcctx = SRF_FIRSTCALL_INIT(); @@ -2039,72 +1923,65 @@ show_blackmap(PG_FUNCTION_ARGS) oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); tupdesc = CreateTemplateTupleDesc(9, false /*hasoid*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "target_type", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "target_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "database_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "tablespace_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "seg_exceeded", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "dbnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "spcnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 8, "relnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 9, "segid", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "target_type", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "target_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "database_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "tablespace_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "seg_exceeded", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "dbnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "spcnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "relnode", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "segid", INT4OID, -1 /*typmod*/, 0 /*attdim*/); funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* Create a local hash table and fill it with entries from shared memory. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(BlackMapEntry); + hashctl.keysize = sizeof(BlackMapEntry); hashctl.entrysize = sizeof(GlobalBlackMapEntry); - hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; - blackmap_ctx = (struct BlackMapCtx *) palloc(sizeof(struct BlackMapCtx)); - blackmap_ctx->blackmap = hash_create("blackmap_ctx blackmap", - 1024, &hashctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + blackmap_ctx = (struct BlackMapCtx *)palloc(sizeof(struct BlackMapCtx)); + blackmap_ctx->blackmap = + hash_create("blackmap_ctx blackmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); hash_seq_init(&hash_seq, disk_quota_black_map); while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) { - GlobalBlackMapEntry *local_blackmap_entry = NULL; - local_blackmap_entry = hash_search(blackmap_ctx->blackmap, - &blackmap_entry->keyitem, - HASH_ENTER_NULL, NULL); + GlobalBlackMapEntry *local_blackmap_entry = NULL; + local_blackmap_entry = hash_search(blackmap_ctx->blackmap, &blackmap_entry->keyitem, HASH_ENTER_NULL, NULL); if (local_blackmap_entry) { - memcpy(&local_blackmap_entry->keyitem, - &blackmap_entry->keyitem, sizeof(BlackMapEntry)); + memcpy(&local_blackmap_entry->keyitem, &blackmap_entry->keyitem, sizeof(BlackMapEntry)); local_blackmap_entry->segexceeded = blackmap_entry->segexceeded; - memcpy(&local_blackmap_entry->auxblockinfo, - &blackmap_entry->auxblockinfo, sizeof(BlackMapEntry)); + memcpy(&local_blackmap_entry->auxblockinfo, &blackmap_entry->auxblockinfo, sizeof(BlackMapEntry)); } } LWLockRelease(diskquota_locks.black_map_lock); /* Setup first calling context. */ - hash_seq_init(&(blackmap_ctx->blackmap_seq), - blackmap_ctx->blackmap); - funcctx->user_fctx = (void *) blackmap_ctx; + hash_seq_init(&(blackmap_ctx->blackmap_seq), blackmap_ctx->blackmap); + funcctx->user_fctx = (void *)blackmap_ctx; MemoryContextSwitchTo(oldcontext); } - funcctx = SRF_PERCALL_SETUP(); - blackmap_ctx = (struct BlackMapCtx *) funcctx->user_fctx; + funcctx = SRF_PERCALL_SETUP(); + blackmap_ctx = (struct BlackMapCtx *)funcctx->user_fctx; while ((blackmap_entry = hash_seq_search(&(blackmap_ctx->blackmap_seq))) != NULL) { #define _TARGETTYPE_STR_SIZE 32 - Datum result; - Datum values[9]; - bool nulls[9]; - HeapTuple tuple; - BlackMapEntry keyitem; - char targettype_str[_TARGETTYPE_STR_SIZE]; - RelFileNode blocked_relfilenode; - - memcpy(&blocked_relfilenode, - &blackmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); + Datum result; + Datum values[9]; + bool nulls[9]; + HeapTuple tuple; + BlackMapEntry keyitem; + char targettype_str[_TARGETTYPE_STR_SIZE]; + RelFileNode blocked_relfilenode; + + memcpy(&blocked_relfilenode, &blackmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); /* * If the blackmap entry is indexed by relfilenode, we dump the blocking * condition from auxblockinfo. @@ -2115,23 +1992,23 @@ show_blackmap(PG_FUNCTION_ARGS) memcpy(&keyitem, &blackmap_entry->auxblockinfo, sizeof(keyitem)); memset(targettype_str, 0, sizeof(targettype_str)); - switch ((QuotaType) keyitem.targettype) + switch ((QuotaType)keyitem.targettype) { - case ROLE_QUOTA: - StrNCpy(targettype_str, "ROLE_QUOTA", _TARGETTYPE_STR_SIZE); - break; - case NAMESPACE_QUOTA: - StrNCpy(targettype_str, "NAMESPACE_QUOTA", _TARGETTYPE_STR_SIZE); - break; - case ROLE_TABLESPACE_QUOTA: - StrNCpy(targettype_str, "ROLE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); - break; - case NAMESPACE_TABLESPACE_QUOTA: - StrNCpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); - break; - default: - StrNCpy(targettype_str, "UNKNOWN", _TARGETTYPE_STR_SIZE); - break; + case ROLE_QUOTA: + StrNCpy(targettype_str, "ROLE_QUOTA", _TARGETTYPE_STR_SIZE); + break; + case NAMESPACE_QUOTA: + StrNCpy(targettype_str, "NAMESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + break; + case ROLE_TABLESPACE_QUOTA: + StrNCpy(targettype_str, "ROLE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + break; + case NAMESPACE_TABLESPACE_QUOTA: + StrNCpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + break; + default: + StrNCpy(targettype_str, "UNKNOWN", _TARGETTYPE_STR_SIZE); + break; } values[0] = CStringGetTextDatum(targettype_str); @@ -2145,7 +2022,7 @@ show_blackmap(PG_FUNCTION_ARGS) values[8] = Int32GetDatum(GpIdentity.segindex); memset(nulls, false, sizeof(nulls)); - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); diff --git a/relation_cache.c b/relation_cache.c index b1794cdf9e1..33a3284cf89 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -13,41 +13,36 @@ #include "relation_cache.h" #include "diskquota.h" -HTAB *relation_cache = NULL; -HTAB *relid_cache = NULL; +HTAB *relation_cache = NULL; +HTAB *relid_cache = NULL; -static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry); +static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, + DiskQuotaRelidCacheEntry *relid_entry); PG_FUNCTION_INFO_V1(show_relation_cache); void init_shm_worker_relation_cache(void) { - HASHCTL ctl; + HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); - ctl.hash = tag_hash; + ctl.hash = tag_hash; - relation_cache = ShmemInitHash("relation_cache", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); + relation_cache = ShmemInitHash("relation_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, + HASH_ELEM | HASH_FUNCTION); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaRelidCacheEntry); - ctl.hash = tag_hash; + ctl.hash = tag_hash; - relid_cache = ShmemInitHash("relid_cache", - diskquota_max_active_tables, - diskquota_max_active_tables, - &ctl, - HASH_ELEM | HASH_FUNCTION); + relid_cache = ShmemInitHash("relid_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, + HASH_ELEM | HASH_FUNCTION); } Oid @@ -56,7 +51,7 @@ get_relid_by_relfilenode(RelFileNode relfilenode) Oid relid; relid = RelidByRelfilenode(relfilenode.spcNode, relfilenode.relNode); - if(OidIsValid(relid)) + if (OidIsValid(relid)) { remove_cache_entry(InvalidOid, relfilenode.relNode); return relid; @@ -70,7 +65,7 @@ void remove_cache_entry(Oid relid, Oid relfilenode) { DiskQuotaRelationCacheEntry *relation_entry; - DiskQuotaRelidCacheEntry *relid_entry; + DiskQuotaRelidCacheEntry *relid_entry; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); if (OidIsValid(relid)) @@ -98,9 +93,9 @@ remove_cache_entry(Oid relid, Oid relfilenode) Oid get_uncommitted_table_relid(Oid relfilenode) { - Oid relid = InvalidOid; + Oid relid = InvalidOid; DiskQuotaRelidCacheEntry *entry; - + LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); entry = hash_search(relid_cache, &relfilenode, HASH_FIND, NULL); if (entry) @@ -116,7 +111,7 @@ static void add_auxrelid_to_relation_entry(DiskQuotaRelationCacheEntry *entry, Oid relid) { int i; - + for (i = 0; i < entry->auxrel_num; i++) { if (entry->auxrel_oid[i] == relid) @@ -140,18 +135,18 @@ update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, Di if (relation_entry) { - relation_entry->relid = relid; - relation_entry->rnode.node = rel->rd_node; + relation_entry->relid = relid; + relation_entry->rnode.node = rel->rd_node; relation_entry->rnode.backend = rel->rd_backend; - relation_entry->owneroid = rel->rd_rel->relowner; - relation_entry->namespaceoid = rel->rd_rel->relnamespace; - relation_entry->relstorage = rel->rd_rel->relstorage; + relation_entry->owneroid = rel->rd_rel->relowner; + relation_entry->namespaceoid = rel->rd_rel->relnamespace; + relation_entry->relstorage = rel->rd_rel->relstorage; } if (relid_entry) { relid_entry->relfilenode = rel->rd_node.relNode; - relid_entry->relid = relid; + relid_entry->relid = relid; } relation_entry->primary_table_relid = relid; @@ -162,11 +157,11 @@ update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, Di void update_relation_cache(Oid relid) { - DiskQuotaRelationCacheEntry relation_entry_data = {0}; + DiskQuotaRelationCacheEntry relation_entry_data = {0}; DiskQuotaRelationCacheEntry *relation_entry; - DiskQuotaRelidCacheEntry relid_entry_data = {0}; - DiskQuotaRelidCacheEntry *relid_entry; - Oid prelid; + DiskQuotaRelidCacheEntry relid_entry_data = {0}; + DiskQuotaRelidCacheEntry *relid_entry; + Oid prelid; update_relation_entry(relid, &relation_entry_data, &relid_entry_data); @@ -183,7 +178,7 @@ update_relation_cache(Oid relid) { LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); relation_entry->primary_table_relid = prelid; - relation_entry = hash_search(relation_cache, &prelid, HASH_FIND, NULL); + relation_entry = hash_search(relation_cache, &prelid, HASH_FIND, NULL); if (relation_entry) { add_auxrelid_to_relation_entry(relation_entry, relid); @@ -197,7 +192,7 @@ parse_primary_table_oid(Oid relid) { Relation rel; Oid namespace; - Oid parsed_oid; + Oid parsed_oid; char relname[NAMEDATALEN]; rel = diskquota_relation_open(relid, NoLock); @@ -222,8 +217,8 @@ Oid get_primary_table_oid(Oid relid) { DiskQuotaRelationCacheEntry *relation_entry; - Oid cached_prelid = relid; - Oid parsed_prelid; + Oid cached_prelid = relid; + Oid parsed_prelid; parsed_prelid = parse_primary_table_oid(relid); if (OidIsValid(parsed_prelid)) @@ -245,22 +240,19 @@ get_primary_table_oid(Oid relid) void remove_committed_relation_from_cache(void) { - HASH_SEQ_STATUS iter = {0}; - DiskQuotaRelationCacheEntry *entry = NULL; + HASH_SEQ_STATUS iter = {0}; + DiskQuotaRelationCacheEntry *entry = NULL; DiskQuotaRelationCacheEntry *local_entry = NULL; - HTAB *local_relation_cache; - HASHCTL ctl; + HTAB *local_relation_cache; + HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; + ctl.hcxt = CurrentMemoryContext; + ctl.hash = oid_hash; - local_relation_cache = hash_create("local relation cache", - 1024, - &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_relation_cache = hash_create("local relation cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); hash_seq_init(&iter, relation_cache); @@ -270,14 +262,14 @@ remove_committed_relation_from_cache(void) memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); } LWLockRelease(diskquota_locks.relation_cache_lock); - + hash_seq_init(&iter, local_relation_cache); while ((local_entry = hash_seq_search(&iter)) != NULL) { /* * The committed table's oid can be fetched by RelidByRelfilenode(). * If the table's relfilenode is modified and its relation_cache_entry - * remains in relation_cache, the outdated relation_cache_entry should + * remains in relation_cache, the outdated relation_cache_entry should * be removed. */ if (OidIsValid(RelidByRelfilenode(local_entry->rnode.node.spcNode, local_entry->rnode.node.relNode))) @@ -292,18 +284,19 @@ Datum show_relation_cache(PG_FUNCTION_ARGS) { DiskQuotaRelationCacheEntry *entry; - FuncCallContext *funcctx; - struct RelationCacheCtx { - HASH_SEQ_STATUS iter; - HTAB *relation_cache; - } *relation_cache_ctx; + FuncCallContext *funcctx; + struct RelationCacheCtx + { + HASH_SEQ_STATUS iter; + HTAB *relation_cache; + } * relation_cache_ctx; if (SRF_IS_FIRSTCALL()) { - TupleDesc tupdesc; - MemoryContext oldcontext; - HASHCTL hashctl; - HASH_SEQ_STATUS hash_seq; + TupleDesc tupdesc; + MemoryContext oldcontext; + HASHCTL hashctl; + HASH_SEQ_STATUS hash_seq; /* Create a function context for cross-call persistence. */ funcctx = SRF_FIRSTCALL_INIT(); @@ -312,38 +305,37 @@ show_relation_cache(PG_FUNCTION_ARGS) oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); tupdesc = CreateTemplateTupleDesc(11, false /*hasoid*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "RELID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "PRIMARY_TABLE_OID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "AUXREL_NUM", INT4OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "OWNEROID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 5, "NAMESPACEOID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 6, "BACKENDID", INT4OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 7, "SPCNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 8, "DBNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 9, "RELNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 10, "RELSTORAGE", CHAROID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber) 11, "AUXREL_OID", OIDARRAYOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "RELID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "PRIMARY_TABLE_OID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "AUXREL_NUM", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "OWNEROID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "NAMESPACEOID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "BACKENDID", INT4OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "SPCNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "DBNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)9, "RELNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)10, "RELSTORAGE", CHAROID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)11, "AUXREL_OID", OIDARRAYOID, -1 /*typmod*/, 0 /*attdim*/); funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* Create a local hash table and fill it with entries from shared memory. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(Oid); + hashctl.keysize = sizeof(Oid); hashctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); - hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; + hashctl.hcxt = CurrentMemoryContext; + hashctl.hash = tag_hash; - relation_cache_ctx = (struct RelationCacheCtx *) palloc(sizeof(struct RelationCacheCtx)); - relation_cache_ctx->relation_cache = hash_create("relation_cache_ctx->relation_cache", - 1024, &hashctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + relation_cache_ctx = (struct RelationCacheCtx *)palloc(sizeof(struct RelationCacheCtx)); + relation_cache_ctx->relation_cache = hash_create("relation_cache_ctx->relation_cache", 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); hash_seq_init(&hash_seq, relation_cache); - while ((entry = (DiskQuotaRelationCacheEntry *) hash_seq_search(&hash_seq)) != NULL) + while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&hash_seq)) != NULL) { - DiskQuotaRelationCacheEntry *local_entry = hash_search(relation_cache_ctx->relation_cache, - &entry->relid, HASH_ENTER_NULL, NULL); + DiskQuotaRelationCacheEntry *local_entry = + hash_search(relation_cache_ctx->relation_cache, &entry->relid, HASH_ENTER_NULL, NULL); if (local_entry) { memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); @@ -353,22 +345,22 @@ show_relation_cache(PG_FUNCTION_ARGS) /* Setup first calling context. */ hash_seq_init(&(relation_cache_ctx->iter), relation_cache_ctx->relation_cache); - funcctx->user_fctx = (void *) relation_cache_ctx; + funcctx->user_fctx = (void *)relation_cache_ctx; MemoryContextSwitchTo(oldcontext); } - funcctx = SRF_PERCALL_SETUP(); - relation_cache_ctx = (struct RelationCacheCtx *) funcctx->user_fctx; + funcctx = SRF_PERCALL_SETUP(); + relation_cache_ctx = (struct RelationCacheCtx *)funcctx->user_fctx; while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&(relation_cache_ctx->iter))) != NULL) { - Datum result; - Datum values[11]; - Datum auxrel_oid[10]; - bool nulls[11]; - HeapTuple tuple; - ArrayType *array; - int i; + Datum result; + Datum values[11]; + Datum auxrel_oid[10]; + bool nulls[11]; + HeapTuple tuple; + ArrayType *array; + int i; for (i = 0; i < entry->auxrel_num; i++) { @@ -376,20 +368,20 @@ show_relation_cache(PG_FUNCTION_ARGS) } array = construct_array(auxrel_oid, entry->auxrel_num, OIDOID, sizeof(Oid), true, 'i'); - values[0] = ObjectIdGetDatum(entry->relid); - values[1] = ObjectIdGetDatum(entry->primary_table_relid); - values[2] = Int32GetDatum(entry->auxrel_num); - values[3] = ObjectIdGetDatum(entry->owneroid); - values[4] = ObjectIdGetDatum(entry->namespaceoid); - values[5] = Int32GetDatum(entry->rnode.backend); - values[6] = ObjectIdGetDatum(entry->rnode.node.spcNode); - values[7] = ObjectIdGetDatum(entry->rnode.node.dbNode); - values[8] = ObjectIdGetDatum(entry->rnode.node.relNode); - values[9] = CharGetDatum(entry->relstorage); + values[0] = ObjectIdGetDatum(entry->relid); + values[1] = ObjectIdGetDatum(entry->primary_table_relid); + values[2] = Int32GetDatum(entry->auxrel_num); + values[3] = ObjectIdGetDatum(entry->owneroid); + values[4] = ObjectIdGetDatum(entry->namespaceoid); + values[5] = Int32GetDatum(entry->rnode.backend); + values[6] = ObjectIdGetDatum(entry->rnode.node.spcNode); + values[7] = ObjectIdGetDatum(entry->rnode.node.dbNode); + values[8] = ObjectIdGetDatum(entry->rnode.node.relNode); + values[9] = CharGetDatum(entry->relstorage); values[10] = PointerGetDatum(array); memset(nulls, false, sizeof(nulls)); - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); @@ -401,15 +393,15 @@ show_relation_cache(PG_FUNCTION_ARGS) static void add_auxrelation_to_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *pentry) { - List *index_oids; - ListCell *cell; + List *index_oids; + ListCell *cell; add_auxrelid_to_relation_entry(pentry, relid); index_oids = diskquota_get_index_list(relid); - foreach(cell, index_oids) + foreach (cell, index_oids) { - Oid idxrelid = lfirst_oid(cell); + Oid idxrelid = lfirst_oid(cell); add_auxrelid_to_relation_entry(pentry, idxrelid); } list_free(index_oids); @@ -418,39 +410,39 @@ add_auxrelation_to_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *pentry /* * Returns true iff blkdirrelid is missing. * pg_aoblkdir_xxxx is created by `create index on ao_table`, which can not be - * fetched by diskquota_get_appendonly_aux_oid_list() before index's creation + * fetched by diskquota_get_appendonly_aux_oid_list() before index's creation * finish. By returning true to inform the caller that blkdirrelid is missing, * then the caller will fetch blkdirrelid by traversing relation_cache. */ static bool -get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relation_entry) +get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry *relation_entry) { - HeapTuple classTup; + HeapTuple classTup; Form_pg_class classForm; - Oid segrelid = InvalidOid; - Oid blkdirrelid = InvalidOid; - Oid visimaprelid = InvalidOid; - bool is_ao = false; + Oid segrelid = InvalidOid; + Oid blkdirrelid = InvalidOid; + Oid visimaprelid = InvalidOid; + bool is_ao = false; classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(classTup) || relation_entry == NULL) { return false; } - - classForm = (Form_pg_class) GETSTRUCT(classTup); - relation_entry->relid = relid; + classForm = (Form_pg_class)GETSTRUCT(classTup); + + relation_entry->relid = relid; relation_entry->primary_table_relid = relid; - relation_entry->owneroid = classForm->relowner; - relation_entry->namespaceoid = classForm->relnamespace; - relation_entry->relstorage = classForm->relstorage; - relation_entry->rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? - classForm->reltablespace : MyDatabaseTableSpace; - relation_entry->rnode.node.dbNode = MyDatabaseId; + relation_entry->owneroid = classForm->relowner; + relation_entry->namespaceoid = classForm->relnamespace; + relation_entry->relstorage = classForm->relstorage; + relation_entry->rnode.node.spcNode = + OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + relation_entry->rnode.node.dbNode = MyDatabaseId; relation_entry->rnode.node.relNode = classForm->relfilenode; - relation_entry->rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? - TempRelBackendId : InvalidBackendId; + relation_entry->rnode.backend = + classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; /* toast table */ if (OidIsValid(classForm->reltoastrelid)) @@ -490,10 +482,10 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry* relatio } static void -get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry* entry) +get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *entry) { - DiskQuotaRelationCacheEntry* tentry; - bool is_missing_relid; + DiskQuotaRelationCacheEntry *tentry; + bool is_missing_relid; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); tentry = hash_search(relation_cache, &relid, HASH_FIND, NULL); @@ -504,13 +496,13 @@ get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry* entry) return; } LWLockRelease(diskquota_locks.relation_cache_lock); - + is_missing_relid = get_relation_entry_from_pg_class(relid, entry); if (is_missing_relid) { DiskQuotaRelationCacheEntry *relation_cache_entry; - HASH_SEQ_STATUS iter; + HASH_SEQ_STATUS iter; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); hash_seq_init(&iter, relation_cache); while ((relation_cache_entry = hash_seq_search(&iter)) != NULL) @@ -528,21 +520,19 @@ static void get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) { DiskQuotaRelationCacheEntry *relation_cache_entry; - HeapTuple classTup; - Form_pg_class classForm; - + HeapTuple classTup; + Form_pg_class classForm; + memset(rnode, 0, sizeof(RelFileNodeBackend)); classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(classTup)) { - classForm = (Form_pg_class) GETSTRUCT(classTup); - rnode->node.spcNode = OidIsValid(classForm->reltablespace) ? - classForm->reltablespace : MyDatabaseTableSpace; - rnode->node.dbNode = MyDatabaseId; + classForm = (Form_pg_class)GETSTRUCT(classTup); + rnode->node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; + rnode->node.dbNode = MyDatabaseId; rnode->node.relNode = classForm->relfilenode; - rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? - TempRelBackendId : InvalidBackendId; - *relstorage = classForm->relstorage; + rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; + *relstorage = classForm->relstorage; heap_freetuple(classTup); remove_cache_entry(relid, InvalidOid); return; @@ -552,7 +542,7 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) relation_cache_entry = hash_search(relation_cache, &relid, HASH_FIND, NULL); if (relation_cache_entry) { - *rnode = relation_cache_entry->rnode; + *rnode = relation_cache_entry->rnode; *relstorage = relation_cache_entry->relstorage; } LWLockRelease(diskquota_locks.relation_cache_lock); @@ -560,15 +550,14 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) return; } - static Size do_calculate_table_size(DiskQuotaRelationCacheEntry *entry) { - Size tablesize = 0; + Size tablesize = 0; RelFileNodeBackend rnode; - char relstorage = 0; - Oid subrelid; - int i; + char relstorage = 0; + Oid subrelid; + int i; get_relfilenode_by_relid(entry->relid, &rnode, &relstorage); tablesize += calculate_relation_size_all_forks(&rnode, relstorage); diff --git a/relation_cache.h b/relation_cache.h index 70a8080a1c4..62a7658285f 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -7,30 +7,30 @@ typedef struct DiskQuotaRelationCacheEntry { - Oid relid; - Oid primary_table_relid; - Oid auxrel_oid[10]; - Oid auxrel_num; - Oid owneroid; - Oid namespaceoid; - char relstorage; - RelFileNodeBackend rnode; -} DiskQuotaRelationCacheEntry; + Oid relid; + Oid primary_table_relid; + Oid auxrel_oid[10]; + Oid auxrel_num; + Oid owneroid; + Oid namespaceoid; + char relstorage; + RelFileNodeBackend rnode; +} DiskQuotaRelationCacheEntry; typedef struct DiskQuotaRelidCacheEntry { - Oid relfilenode; - Oid relid; -} DiskQuotaRelidCacheEntry; + Oid relfilenode; + Oid relid; +} DiskQuotaRelidCacheEntry; extern HTAB *relation_cache; extern void init_shm_worker_relation_cache(void); -extern Oid get_relid_by_relfilenode(RelFileNode relfilenode); +extern Oid get_relid_by_relfilenode(RelFileNode relfilenode); extern void remove_cache_entry(Oid relid, Oid relfilenode); -extern Oid get_uncommitted_table_relid(Oid relfilenode); +extern Oid get_uncommitted_table_relid(Oid relfilenode); extern void update_relation_cache(Oid relid); -extern Oid get_primary_table_oid(Oid relid); +extern Oid get_primary_table_oid(Oid relid); extern void remove_committed_relation_from_cache(void); extern Size calculate_table_size(Oid relid); From e3edb27b589125a0e12394ffb5e364b8e7521026 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 17 Mar 2022 18:05:46 +0800 Subject: [PATCH 165/330] remove the in-logic version check. After https://github.com/greenplum-db/diskquota/pull/166, we do not allow one binary running on two or more DLL which have different version. By the new method, we will block worker enter normal status if the DDL version is not match. NOTE: launcher don't has the version check, if launcher DDL is modified, will be undefined behavior. this commit also remove some mem-alloc by using static const char* in SQL string. --- diskquota.c | 28 ++-- diskquota.h | 1 - diskquota_utility.c | 361 +++++++++++++++++++++++--------------------- gp_activetable.c | 31 +--- quotamodel.c | 146 +++++------------- 5 files changed, 247 insertions(+), 320 deletions(-) diff --git a/diskquota.c b/diskquota.c index 5ed631bc5e4..27bf04a4628 100644 --- a/diskquota.c +++ b/diskquota.c @@ -881,8 +881,8 @@ add_dbid_to_database_list(Oid dbid) { int ret; - Oid argt[1] = {INT4OID}; - Datum argv[1] = {Int32GetDatum(dbid)}; + Oid argt[1] = {OIDOID}; + Datum argv[1] = {ObjectIdGetDatum(dbid)}; ret = SPI_execute_with_args("select * from diskquota_namespace.database_list where dbid = $1", 1, argt, argv, NULL, true, 0); @@ -918,20 +918,20 @@ add_dbid_to_database_list(Oid dbid) static void del_dbid_from_database_list(Oid dbid) { - StringInfoData str; - int ret; - - initStringInfo(&str); - appendStringInfo(&str, "delete from diskquota_namespace.database_list where dbid=%u;", dbid); + int ret; /* errors will be cached in outer function */ - ret = SPI_execute(str.data, false, 0); - if (ret != SPI_OK_DELETE) - { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute sql: \"%s\", errno: %d, ret_code: %d.", str.data, - errno, ret))); - } - pfree(str.data); + ret = SPI_execute_with_args("delete from diskquota_namespace.database_list where dbid = $1", 1, + (Oid[]){ + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(dbid), + }, + NULL, false, 0); + + ereportif(ret != SPI_OK_DELETE, ERROR, + (errmsg("[diskquota launcher] del_dbid_from_database_list: errno: %d, ret_code: %d.", errno, ret))); } /* diff --git a/diskquota.h b/diskquota.h index 3301840d16d..5b12abaf2d2 100644 --- a/diskquota.h +++ b/diskquota.h @@ -148,7 +148,6 @@ extern bool diskquota_hardlimit; extern int SEGCOUNT; extern int worker_spi_get_extension_version(int *major, int *minor); -extern int get_ext_major_version(void); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); diff --git a/diskquota_utility.c b/diskquota_utility.c index 0d354fc98df..6c05968302e 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -74,7 +74,6 @@ static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); -int get_ext_major_version(void); List *get_rel_oid_list(void); /* ---- Help Functions to set quota limit. ---- */ @@ -87,12 +86,10 @@ List *get_rel_oid_list(void); Datum init_table_size_table(PG_FUNCTION_ARGS) { - int ret; - StringInfoData buf; + int ret; RangeVar *rv; Relation rel; - int extMajorVersion; /* * If error happens in init_table_size_table, just return error messages * to the client side. So there is no need to catch the error. @@ -124,49 +121,45 @@ init_table_size_table(PG_FUNCTION_ARGS) * from entry-db currently. */ SPI_connect(); - extMajorVersion = get_ext_major_version(); /* delete all the table size info in table_size if exist. */ - initStringInfo(&buf); - appendStringInfo(&buf, "truncate table diskquota.table_size;"); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute("truncate table diskquota.table_size", false, 0); if (ret != SPI_OK_UTILITY) elog(ERROR, "cannot truncate table_size table: error code %d", ret); - if (extMajorVersion == 1) - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "INSERT INTO diskquota.table_size WITH all_size AS " - "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id') " - "UNION ALL SELECT diskquota.pull_all_table_size()) " - "SELECT (a).tableid, sum((a).size) FROM all_size GROUP BY (a).tableid;"); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); - } else - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "INSERT INTO diskquota.table_size WITH all_size AS " - "(SELECT diskquota.pull_all_table_size() as a FROM gp_dist_random('gp_id')) " - "SELECT (a).* FROM all_size;"); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); - - resetStringInfo(&buf); - /* size is the sum of size on master and on all segments when segid == -1. */ - appendStringInfo(&buf, - "INSERT INTO diskquota.table_size WITH total_size AS " - "(SELECT * from diskquota.pull_all_table_size() " - "UNION ALL SELECT tableid, size, segid FROM diskquota.table_size) " - "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;"); - ret = SPI_execute(buf.data, false, 0); - if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); - } + ret = SPI_execute( + "INSERT INTO " + " diskquota.table_size " + "WITH all_size AS " + " (" + " SELECT diskquota.pull_all_table_size() AS a FROM gp_dist_random('gp_id')" + " ) " + "SELECT (a).* FROM all_size", + false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); + + /* size is the sum of size on master and on all segments when segid == -1. */ + ret = SPI_execute( + "INSERT INTO " + " diskquota.table_size " + "WITH total_size AS " + " (" + " SELECT * from diskquota.pull_all_table_size()" + " UNION ALL " + " SELECT tableid, size, segid FROM diskquota.table_size" + " ) " + "SELECT tableid, sum(size) as size, -1 as segid FROM total_size GROUP BY tableid;", + false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into table_size table: error code %d", ret); /* set diskquota state to ready. */ - resetStringInfo(&buf); - appendStringInfo(&buf, "update diskquota.state set state = %u;", DISKQUOTA_READY_STATE); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args("update diskquota.state set state = $1", 1, + (Oid[]){ + INT4OID, + }, + (Datum[]){ + Int32GetDatum(DISKQUOTA_READY_STATE), + }, + NULL, false, 0); if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update state table: error code %d", ret); SPI_finish(); @@ -402,6 +395,8 @@ dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) pause_extension ? "pausing" : "resuming", PQresultStatus(pgresult)))); } } + + pfree(sql.data); cdbdisp_clearCdbPgResults(&cdb_pgresults); } @@ -497,15 +492,9 @@ diskquota_resume(PG_FUNCTION_ARGS) static bool is_database_empty(void) { - int ret; - StringInfoData buf; - TupleDesc tupdesc; - bool is_empty = false; - - initStringInfo(&buf); - appendStringInfo(&buf, - "SELECT (count(relname) = 0) FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 and " - "relnamespace = n.oid and nspname != 'diskquota'"); + int ret; + TupleDesc tupdesc; + bool is_empty = false; /* * If error happens in is_database_empty, just return error messages to @@ -513,8 +502,15 @@ is_database_empty(void) */ SPI_connect(); - ret = SPI_execute(buf.data, true, 0); + ret = SPI_execute( + "SELECT (count(relname) = 0) " + "FROM " + " pg_class AS c, " + " pg_namespace AS n " + "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'", + true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select pg_class and pg_namespace table: error code %d", errno); + tupdesc = SPI_tuptable->tupdesc; /* check sql return value whether database is empty */ if (SPI_processed > 0) @@ -821,14 +817,7 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) { - int ret; - StringInfoData buf; - - initStringInfo(&buf); - appendStringInfo(&buf, - "select true from diskquota.quota_config where targetoid = %u" - " and quotatype =%d", - targetoid, type); + int ret; /* * If error happens in set_quota_config_internal, just return error messages to @@ -836,34 +825,62 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) */ SPI_connect(); - ret = SPI_execute(buf.data, true, 0); + ret = SPI_execute_with_args("select true from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); /* if the schema or role's quota has been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { - resetStringInfo(&buf); - appendStringInfo(&buf, "insert into diskquota.quota_config values(%u,%d,%ld);", targetoid, type, - quota_limit_mb); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args("insert into diskquota.quota_config values($1, $2, $3)", 3, + (Oid[]){ + OIDOID, + INT4OID, + INT8OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + Int64GetDatum(quota_limit_mb), + }, + NULL, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); } else if (SPI_processed > 0 && quota_limit_mb < 0) { - resetStringInfo(&buf); - appendStringInfo(&buf, - "delete from diskquota.quota_config where targetoid=%u" - " and quotatype=%d;", - targetoid, type); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args("delete from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); } else if (SPI_processed > 0 && quota_limit_mb > 0) { - resetStringInfo(&buf); - appendStringInfo(&buf, - "update diskquota.quota_config set quotalimitMB = %ld where targetoid=%u" - " and quotatype=%d;", - quota_limit_mb, targetoid, type); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args( + "update diskquota.quota_config set quotalimitMB = $1 where targetoid= $2 and quotatype = $3", 3, + (Oid[]){ + INT8OID, + OIDOID, + INT4OID, + }, + (Datum[]){ + Int64GetDatum(quota_limit_mb), + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); } @@ -877,18 +894,7 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type) { - int ret; - StringInfoData buf; - - initStringInfo(&buf); - appendStringInfo(&buf, - "select true from diskquota.quota_config as q, diskquota.target as t" - " where t.primaryOid = %u" - " and t.tablespaceOid=%u" - " and t.quotaType=%d" - " and t.quotaType=q.quotaType" - " and t.primaryOid=q.targetOid;", - primaryoid, spcoid, type); + int ret; /* * If error happens in set_quota_config_internal, just return error messages to @@ -896,24 +902,56 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType */ SPI_connect(); - ret = SPI_execute(buf.data, true, 0); + ret = SPI_execute_with_args( + "select true from diskquota.quota_config as q, diskquota.target as t" + " where t.primaryOid = $1" + " and t.tablespaceOid = $2" + " and t.quotaType = $3" + " and t.quotaType = q.quotaType" + " and t.primaryOid = q.targetOid", + 3, + (Oid[]){ + OIDOID, + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + Int32GetDatum(type), + }, + NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target setting table: error code %d", ret); /* if the schema or role's quota has been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { - resetStringInfo(&buf); - appendStringInfo(&buf, "insert into diskquota.target values(%d,%u,%u)", type, primaryoid, spcoid); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args("insert into diskquota.target values($1, $2, $3)", 3, + (Oid[]){ + INT4OID, + OIDOID, + OIDOID, + }, + (Datum[]){ + Int32GetDatum(type), + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } else if (SPI_processed > 0 && quota_limit_mb < 0) { - resetStringInfo(&buf); - appendStringInfo(&buf, - "delete from diskquota.target where primaryOid=%u" - " and tablespaceOid=%u;", - primaryoid, spcoid); - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args("delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2", 2, + (Oid[]){ + OIDOID, + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from target setting table, error code %d", ret); } @@ -1087,10 +1125,9 @@ set_per_segment_quota(PG_FUNCTION_ARGS) Oid spcoid; char *spcname; float4 ratio; - if (!superuser()) - { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); - } + + ereportif(!superuser(), ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); spcname = text_to_cstring(PG_GETARG_TEXT_PP(0)); spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); @@ -1098,12 +1135,8 @@ set_per_segment_quota(PG_FUNCTION_ARGS) ratio = PG_GETARG_FLOAT4(1); - if (ratio == 0) - { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("per segment quota ratio can not be set to 0"))); - } - StringInfoData buf; + ereportif(ratio == 0, ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("per segment quota ratio can not be set to 0"))); if (SPI_OK_CONNECT != SPI_connect()) { @@ -1111,32 +1144,53 @@ set_per_segment_quota(PG_FUNCTION_ARGS) } /* Get all targetOid which are related to this tablespace, and saved into rowIds */ - initStringInfo(&buf); - appendStringInfo( - &buf, - "SELECT true FROM diskquota.target as t, diskquota.quota_config as q WHERE tablespaceOid = %u AND " - "(t.quotaType = %d OR t.quotaType = %d) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", - spcoid, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA); - - ret = SPI_execute(buf.data, true, 0); + ret = SPI_execute_with_args( + "SELECT true FROM diskquota.target AS t, diskquota.quota_config AS q WHERE tablespaceOid = $1 AND " + "(t.quotaType = $2 OR t.quotaType = $3) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", + 3, + (Oid[]){ + OIDOID, + INT4OID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(spcoid), + Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), + Int32GetDatum(ROLE_TABLESPACE_QUOTA), + }, + NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target and quota setting table: error code %d", ret); + if (SPI_processed <= 0) { ereport(ERROR, (errmsg("there are no roles or schema quota configed for this tablespace: %s, can't config per " "segment ratio for it", spcname))); } - resetStringInfo(&buf); - appendStringInfo(&buf, - "UPDATE diskquota.quota_config AS q set segratio = %f FROM diskquota.target AS t WHERE " - "q.targetOid = t.primaryOid AND (t.quotaType = %d OR t.quotaType = %d) AND t.quotaType = " - "q.quotaType And t.tablespaceOid = %d", - ratio, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA, spcoid); + /* * UPDATEA NAMESPACE_TABLESPACE_PERSEG_QUOTA AND ROLE_TABLESPACE_PERSEG_QUOTA config for this tablespace */ - ret = SPI_execute(buf.data, false, 0); + ret = SPI_execute_with_args( + "UPDATE diskquota.quota_config AS q set segratio = $1 FROM diskquota.target AS t WHERE " + "q.targetOid = t.primaryOid AND (t.quotaType = $2 OR t.quotaType = $3) AND t.quotaType = " + "q.quotaType And t.tablespaceOid = $4", + 4, + (Oid[]){ + FLOAT4OID, + INT4OID, + INT4OID, + OIDOID, + }, + (Datum[]){ + Float4GetDatum(ratio), + Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), + Int32GetDatum(ROLE_TABLESPACE_QUOTA), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update item from quota setting table, error code %d", ret); + /* * And finish our transaction. */ @@ -1199,43 +1253,6 @@ worker_spi_get_extension_version(int *major, int *minor) return ret; } -/* - * Get major version from extversion, and convert it to int - * 0 means an invalid major version. - */ -int -get_ext_major_version(void) -{ - int ret; - TupleDesc tupdesc; - HeapTuple tup; - Datum dat; - bool isnull; - char *extversion; - - ret = SPI_execute("select COALESCE(extversion,'') from pg_extension where extname = 'diskquota'", true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); - - tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != TEXTOID || SPI_processed != 1) - { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] can not get diskquota extesion version"))); - } - - tup = SPI_tuptable->vals[0]; - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] can not get diskquota extesion version"))); - extversion = TextDatumGetCString(dat); - if (extversion) - { - return (int)strtol(extversion, (char **)NULL, 10); - } - return 0; -} - /* * Get the list of oids of the tables which diskquota * needs to care about in the database. @@ -1247,19 +1264,19 @@ get_ext_major_version(void) List * get_rel_oid_list(void) { - List *oidlist = NIL; - StringInfoData buf; - int ret; - - initStringInfo(&buf); - appendStringInfo(&buf, - "select oid " - " from pg_class" - " where oid >= %u and (relkind='r' or relkind='m')", - FirstNormalObjectId); - - ret = SPI_execute(buf.data, false, 0); + List *oidlist = NIL; + int ret; + + ret = SPI_execute_with_args("select oid from pg_class where oid >= $1 and (relkind='r' or relkind='m')", 1, + (Oid[]){ + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(FirstNormalObjectId), + }, + NULL, false, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); + TupleDesc tupdesc = SPI_tuptable->tupdesc; for (int i = 0; i < SPI_processed; i++) { diff --git a/gp_activetable.c b/gp_activetable.c index 231c0bc3156..ddca783148c 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -365,14 +365,12 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) { MemoryContext oldcontext; TupleDesc tupdesc; - int extMajorVersion; int ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); } - extMajorVersion = get_ext_major_version(); SPI_finish(); /* create a function context for cross-call persistence */ @@ -414,21 +412,10 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) /* * prepare attribute metadata for next calls that generate the tuple */ - switch (extMajorVersion) - { - case 1: - tupdesc = CreateTemplateTupleDesc(2, false); - break; - case 2: - tupdesc = CreateTemplateTupleDesc(3, false); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "GP_SEGMENT_ID", INT2OID, -1, 0); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + tupdesc = CreateTemplateTupleDesc(3, false); TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLE_OID", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)2, "TABLE_SIZE", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "GP_SEGMENT_ID", INT2OID, -1, 0); attinmeta = TupleDescGetAttInMetadata(tupdesc); funcctx->attinmeta = attinmeta; @@ -796,20 +783,8 @@ load_table_size(HTAB *local_table_stats_map) bool found; TableEntryKey key; DiskQuotaActiveTableEntry *quota_entry; - int extMajorVersion = get_ext_major_version(); - switch (extMajorVersion) - { - case 1: - ret = SPI_execute("select tableid, size, CAST(-1 AS smallint) from diskquota.table_size", true, 0); - break; - case 2: - ret = SPI_execute("select tableid, size, segid from diskquota.table_size", true, 0); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + ret = SPI_execute("select tableid, size, segid from diskquota.table_size", true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: return code %d, error: %m", ret))); diff --git a/quotamodel.c b/quotamodel.c index d134a79a17a..0887cdab622 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -571,25 +571,25 @@ check_diskquota_state_is_ready(void) static bool do_check_diskquota_state_is_ready(void) { - int ret; - TupleDesc tupdesc; - int i; - StringInfoData sql_command; + int ret; + TupleDesc tupdesc; + int i; - initStringInfo(&sql_command); /* Add current database to the monitored db cache on all segments */ - appendStringInfo(&sql_command, - "SELECT diskquota.diskquota_fetch_table_stat(%d, ARRAY[]::oid[]) " - "FROM gp_dist_random('gp_id');", - ADD_DB_TO_MONITOR); - ret = SPI_execute(sql_command.data, true, 0); - if (ret != SPI_OK_SELECT) - { - pfree(sql_command.data); - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); - } - pfree(sql_command.data); + ret = SPI_execute_with_args( + "SELECT diskquota.diskquota_fetch_table_stat($1, ARRAY[]::oid[]) FROM gp_dist_random('gp_id')", 1, + (Oid[]){ + INT4OID, + }, + (Datum[]){ + Int32GetDatum(ADD_DB_TO_MONITOR), + }, + NULL, true, 0); + + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + /* Add current database to the monitored db cache on coordinator */ update_diskquota_db_list(MyDatabaseId, HASH_ENTER); /* @@ -597,9 +597,9 @@ do_check_diskquota_state_is_ready(void) * at upper level function. */ ret = SPI_execute("select state from diskquota.state", true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) @@ -970,7 +970,6 @@ flush_to_table_size(void) bool delete_statement_flag = false; bool insert_statement_flag = false; int ret; - int extMajorVersion = get_ext_major_version(); /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ @@ -983,24 +982,16 @@ flush_to_table_size(void) initStringInfo(&insert_statement); appendStringInfo(&insert_statement, "insert into diskquota.table_size values "); + + initStringInfo(&delete_statement); + hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { /* delete dropped table from both table_size_map and table table_size */ if (tsentry->is_exist == false) { - switch (extMajorVersion) - { - case 1: - appendStringInfo(&deleted_table_expr, "%u, ", tsentry->reloid); - break; - case 2: - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); delete_statement_flag = true; hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); @@ -1009,28 +1000,10 @@ flush_to_table_size(void) else if (tsentry->need_flush == true) { tsentry->need_flush = false; - switch (extMajorVersion) - { - case 1: - if (tsentry->segid == -1) - { - appendStringInfo(&deleted_table_expr, "%u, ", tsentry->reloid); - appendStringInfo(&insert_statement, "(%u,%ld), ", tsentry->reloid, tsentry->totalsize); - delete_statement_flag = true; - insert_statement_flag = true; - } - break; - case 2: - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); - appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, - tsentry->segid); - delete_statement_flag = true; - insert_statement_flag = true; - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); + appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); + delete_statement_flag = true; + insert_statement_flag = true; } } truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); @@ -1041,23 +1014,10 @@ flush_to_table_size(void) if (delete_statement_flag) { /* concatenate all the need_to_flush table to SQL string */ - initStringInfo(&delete_statement); appendStringInfoString(&delete_statement, (const char *)deleted_table_expr.data); - switch (extMajorVersion) - { - case 1: - appendStringInfo(&delete_statement, - "delete from diskquota.table_size where tableid in ( SELECT * FROM deleted_table );"); - break; - case 2: - appendStringInfo( - &delete_statement, - "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + appendStringInfoString( + &delete_statement, + "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); ret = SPI_execute(delete_statement.data, false, 0); if (ret != SPI_OK_DELETE) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -1072,6 +1032,10 @@ flush_to_table_size(void) } optimizer = old_optimizer; + + pfree(delete_statement.data); + pfree(insert_statement.data); + pfree(deleted_table_expr.data); } /* @@ -1255,7 +1219,6 @@ do_load_quotas(void) int ret; TupleDesc tupdesc; int i; - int extMajorVersion; /* * TODO: we should skip to reload quota config when there is no change in @@ -1263,43 +1226,16 @@ do_load_quotas(void) * config change. */ clear_all_quota_maps(); - extMajorVersion = get_ext_major_version(); /* * read quotas from diskquota.quota_config and target table */ - - /* - * We need to check the extension version. - * Why do we need this? - * As when we upgrade diskquota extension from an old to a new version, - * we need firstly reload the new diskquota.so and then execute the - * upgrade SQL. However, between the 2 steps, the new diskquota.so - * needs to work with the old version diskquota sql file, otherwise, - * the init work will fail and diskquota can not work correctly. - * Maybe this is not the best sulotion, only a work arround. Optimizing - * the init procedure is a better solution. - */ - switch (extMajorVersion) - { - case 1: - ret = SPI_execute( - "select targetoid, quotatype, quotalimitMB, 0 as segratio, 0 as tablespaceoid from " - "diskquota.quota_config", - true, 0); - break; - case 2: - ret = SPI_execute( - "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, " - "COALESCE(t.tablespaceoid, 0) AS tablespaceoid " - "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " - "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", - true, 0); - break; - default: - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] unknown diskquota extension version: %d", extMajorVersion))); - } + ret = SPI_execute( + "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, " + "COALESCE(t.tablespaceoid, 0) AS tablespaceoid " + "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " + "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", + true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); From 48c773d8aadd1b95f94fa99b020705bd98630642 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Wed, 16 Mar 2022 11:38:38 +0800 Subject: [PATCH 166/330] ci: add upgrade version naming convention check --- CMakeLists.txt | 33 +++++++++++--- cmake/Gpdb.cmake | 2 +- upgrade_test/CMakeLists.txt | 91 +++++++++++++++++++++++++++++++------ 3 files changed, 104 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 720e7bced89..3b02affa935 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.18) -# include_guard() need 3.10 # file(ARCHIVE_EXTRACT foo) need 3.18 project(diskquota) @@ -72,7 +71,9 @@ list( add_library(diskquota MODULE ${diskquota_SRC}) if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) - set(CMAKE_INSTALL_PREFIX "${PG_HOME}" CACHE PATH "default install prefix" FORCE) + set(CMAKE_INSTALL_PREFIX + "${PG_HOME}" + CACHE PATH "default install prefix" FORCE) endif() set_target_properties( @@ -82,10 +83,6 @@ set_target_properties( C_STANDARD 99 LINKER_LANGUAGE "CXX") -# Add installcheck targets -add_subdirectory(tests) -add_subdirectory(upgrade_test) - # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) @@ -95,6 +92,26 @@ if(DEFINED DISKQUOTA_LAST_RELEASE_PATH) file(GLOB DISKQUOTA_PREVIOUS_LIBRARY "${CMAKE_BINARY_DIR}/lib/postgresql/*.so") install(PROGRAMS ${DISKQUOTA_PREVIOUS_LIBRARY} DESTINATION "lib/postgresql/") + + get_filename_component( + DISKQUOTA_LAST_RELEASE_FILENAME ${DISKQUOTA_LAST_RELEASE_PATH} NAME CACHE + "last release installer name") + string( + REGEX + REPLACE "^diskquota-([0-9]+).[0-9]+.[0-9]+-.*$" "\\1" + DISKQUOTA_LAST_MAJOR_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + string( + REGEX + REPLACE "^diskquota-[0-9]+.([0-9]+).[0-9]+-.*$" "\\1" + DISKQUOTA_LAST_MINOR_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + string( + REGEX + REPLACE "^diskquota-[0-9]+.[0-9]+.([0-9]+)-.*$" "\\1" + DISKQUOTA_LAST_PATCH_VERSION ${DISKQUOTA_LAST_RELEASE_FILENAME}) + + set(DISKQUOTA_LAST_VERSION + "${DISKQUOTA_LAST_MAJOR_VERSION}.${DISKQUOTA_LAST_MINOR_VERSION}.${DISKQUOTA_LAST_PATCH_VERSION}" + ) endif() set(CPACK_GENERATOR "TGZ") @@ -117,6 +134,10 @@ BuildInfo_Create(${build_info_PATH} GP_VERSION) # Create build-info end +# Add installcheck targets +add_subdirectory(tests) +add_subdirectory(upgrade_test) + # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") diff --git a/cmake/Gpdb.cmake b/cmake/Gpdb.cmake index b98d6dd47fd..4758d2d70a3 100644 --- a/cmake/Gpdb.cmake +++ b/cmake/Gpdb.cmake @@ -46,7 +46,7 @@ if (NOT PG_SRC_DIR) execute_process( COMMAND_ECHO STDOUT COMMAND - grep abs_top_builddir ${makefile_global} + grep "^abs_top_builddir" ${makefile_global} COMMAND sed s/.*abs_top_builddir.*=\\\(.*\\\)/\\1/ OUTPUT_VARIABLE PG_SRC_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 286872966c9..be07de9f678 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -1,18 +1,79 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) -RegressTarget_Add(upgrade - INIT_FILE - ${CMAKE_CURRENT_SOURCE_DIR}/init_file - SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sql - EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/expected - RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/results - SCHEDULE_FILE - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 - REGRESS_OPTS --dbname=contrib_regression) - -# not use `installcheck` target on purpose. -# upgrade test is not needed in feature development -add_custom_target(upgradecheck) +regresstarget_add( + upgradecheck + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/sql + EXPECTED_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/expected + RESULTS_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/results + SCHEDULE_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 + REGRESS_OPTS + --dbname=contrib_regression) + +# check whether DDL file (*.sql) is modified +file(GLOB ddl_files ${CMAKE_SOURCE_DIR}/*.sql) +foreach(ddl IN LISTS ddl_files) + exec_program( + git ARGS + diff --exit-code ${ddl} + OUTPUT_VARIABLE NULL + RETURN_VALUE "${ddl}_modified") + + if("${${ddl}_modified}") + message( + NOTICE + "detected DDL file ${ddl} is modified, checking if upgrade test is needed." + ) + set(DISKQUOTA_DDL_MODIFIED TRUE) + endif() +endforeach() + +# if DDL file modified, insure the last release file passed in +if(DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) + message( + FATAL_ERROR + "DDL file modify detected, upgrade test is required. Add -DDISKQUOTA_LAST_RELEASE_PATH=//diskquota--_.tar.gz. And re-try the generation" + ) +endif() + +# check if current version is compatible with the upgrade strategy +if(DISKQUOTA_DDL_MODIFIED AND DEFINED DISKQUOTA_LAST_RELEASE_PATH) + message(NOTICE "current version ${DISKQUOTA_VERSION}") + message(NOTICE "last version ${DISKQUOTA_LAST_VERSION}") -add_dependencies(upgradecheck upgrade) + # if 1.0.a = 1.0.b reject + if("${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}" STREQUAL + "${DISKQUOTA_LAST_MAJOR_VERSION}.${DISKQUOTA_LAST_MINOR_VERSION}") + message(FATAL_ERROR "should bump at last one minor version") + endif() + + # if 1.0.a to 1.2.b reject + math(EXPR DISKQUOTA_NEXT_MINOR_VERSION "${DISKQUOTA_LAST_MINOR_VERSION} + 1") + if(("${DISKQUOTA_MAJOR_VERSION}" STREQUAL "${DISKQUOTA_LAST_MAJOR_VERSION}") + AND (NOT "${DISKQUOTA_MINOR_VERSION}" STREQUAL + "${DISKQUOTA_NEXT_MINOR_VERSION}")) + message(FATAL_ERROR "should not skip any minor version") + endif() + + # if 1.a.a to 3.a.a reject + math(EXPR DISKQUOTA_NEXT_MAJOR_VERSION "${DISKQUOTA_LAST_MAJOR_VERSION} + 1") + if((NOT "${DISKQUOTA_MAJOR_VERSION}" STREQUAL + "${DISKQUOTA_LAST_MAJOR_VERSION}") + AND (NOT "${DISKQUOTA_NEXT_MAJOR_VERSION}" STREQUAL + "${DISKQUOTA_MAJOR_VERSION}")) + message(FATAL_ERROR "should not skip any major version") + endif() + + message( + NOTICE + "upgrade from ${DISKQUOTA_LAST_VERSION} to ${DISKQUOTA_VERSION} is available" + ) +endif() + +# upgrade test is not needed in feature development From 8340d122a0bb7a902bb36acd2424c3a0d0d8e8c0 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 21 Mar 2022 11:52:04 +0800 Subject: [PATCH 167/330] Support release build (#186) - Introduce macro DISKQUOTA_DEBUG through CMAKE_BUILD_TYPE. For the debug build, check the macro and allow naptime = 0. The min naptime for release build is 1 to prevent high cpu usage in user's environment. - Add EXCLUDE arg to Regree.cmake to skip some tests for the release build since the release GPDB build doesn't support fault injector. - Use GPDB release candidate to compile diskquota release. The release candidate has assertion disabled in the pg_config.h. - Set naptime=0 in the config.sql won't take effect for diskquota release since it is less than the min value. So the naptime stays the same (normally the default value, 2). This fails some of the test, especially the hardlimit related ones. If the insertion takes less time than naptime (2 seconds, normally), it fails since the limitation has not been dispatched before finish. Thus, the naptime is set to 2 seconds instead of 10 in the reset_config. --- CMakeLists.txt | 16 ++++++- cmake/Regress.cmake | 11 ++++- concourse/pipeline/dev.yml | 6 ++- concourse/pipeline/job_def.lib.yml | 22 +++++---- concourse/pipeline/res_def.yml | 48 +++++++++++++++---- concourse/scripts/build_diskquota.sh | 4 +- concourse/scripts/entry.sh | 14 +++--- concourse/tasks/build_diskquota.yml | 1 + diskquota.c | 8 +++- tests/CMakeLists.txt | 23 ++++++++- tests/isolation2/expected/config.out | 3 ++ tests/isolation2/expected/reset_config.out | 4 +- .../expected/test_postmaster_restart.out | 4 +- tests/isolation2/sql/config.sql | 3 ++ tests/isolation2/sql/reset_config.sql | 2 +- .../sql/test_postmaster_restart.sql | 4 +- tests/regress/expected/config.out | 4 +- tests/regress/expected/reset_config.out | 2 +- tests/regress/expected/test_ctas_pause.out | 4 +- .../expected/test_default_tablespace.out | 12 +++++ .../expected/test_drop_after_pause.out | 2 +- .../regress/expected/test_update_db_cache.out | 2 +- tests/regress/sql/config.sql | 2 + tests/regress/sql/reset_config.sql | 2 +- tests/regress/sql/test_ctas_pause.sql | 4 +- tests/regress/sql/test_default_tablespace.sql | 2 + tests/regress/sql/test_drop_after_pause.sql | 2 +- tests/regress/sql/test_update_db_cache.sql | 2 +- 28 files changed, 164 insertions(+), 49 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b02affa935..94f4aed98cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,12 @@ cmake_minimum_required(VERSION 3.18) project(diskquota) +if(NOT CMAKE_BUILD_TYPE) + message(STATUS "Setting build type to 'Debug' as none was specified.") + set(CMAKE_BUILD_TYPE "Debug" CACHE + STRING "Choose the type of build." FORCE) +endif() + # generate 'compile_commands.json' set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -16,6 +22,13 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Gpdb.cmake) # set include directories for all sub-projects include_directories(${PG_INCLUDE_DIR_SERVER}) include_directories(${PG_INCLUDE_DIR}) # for libpq +# Overwrite the default build type flags set by cmake. +# We don't want the '-O3 -DNDEBUG' from cmake. Instead, those will be set by the CFLAGS from pg_config. +# And, the good news is, GPDB release always have '-g'. +set(CMAKE_C_FLAGS_RELEASE "" CACHE + STRING "Flags for RELEASE build" FORCE) +set(CMAKE_C_FLAGS_DEBUG "-DDISKQUOTA_DEBUG" + CACHE STRING "Flags for DEBUG build" FORCE) # set link flags for all sub-projects set(CMAKE_SHARED_LINKER_FLAGS "${PG_LD_FLAGS}") # set c and ld flags for all projects @@ -131,7 +144,8 @@ BuildInfo_Create(${build_info_PATH} DISKQUOTA_GIT_HASH DISKQUOTA_VERSION GP_MAJOR_VERSION - GP_VERSION) + GP_VERSION + CMAKE_BUILD_TYPE) # Create build-info end # Add installcheck targets diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index c2eabb09f33..77df6650335 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -8,6 +8,7 @@ # [INIT_FILE ...] # [SCHEDULE_FILE ...] # [REGRESS ...] +# [EXCLUDE ...] # [REGRESS_OPTS ...] # [REGRESS_TYPE isolation2/regress] # ) @@ -50,7 +51,7 @@ function(RegressTarget_Add name) arg "" "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE" - "REGRESS;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" + "REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" ${ARGN}) if (NOT arg_EXPECTED_DIR) message(FATAL_ERROR @@ -93,6 +94,14 @@ function(RegressTarget_Add name) get_filename_component(schedule_file_PATH ${o} ABSOLUTE) list(APPEND arg_REGRESS_OPTS "--schedule=${schedule_file_PATH}") endforeach() + foreach(o IN LISTS arg_EXCLUDE) + list(APPEND to_exclude ${o}) + endforeach() + if (to_exclude) + set(exclude_arg "--exclude-tests=${to_exclude}") + string(REPLACE ";" "," exclude_arg "${exclude_arg}") + set(regress_opts_arg ${regress_opts_arg} ${exclude_arg}) + endif() foreach(o IN LISTS arg_REGRESS_OPTS) set(regress_opts_arg ${regress_opts_arg} ${o}) endforeach() diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index e657e3986bc..b033e8b6488 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -14,7 +14,11 @@ #@ res_type_map = {} #@ trigger = commit_dev_trigger(res_map) #@ confs= [ -#@ ubuntu18_gpdb6_conf()] +#@ centos6_gpdb6_conf(release_build=True), +#@ centos7_gpdb6_conf(release_build=True), +#@ rhel8_gpdb6_conf(release_build=True), +#@ ubuntu18_gpdb6_conf(release_build=True) +#@ ] jobs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 1468041288d..fa150031296 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -2,43 +2,48 @@ #@ load("@ytt:template", "template") #! Job config for centos6 -#@ def centos6_gpdb6_conf(): +#! Use bin_gpdb_postfix="" to use a release version of gpdb binary +#@ def centos6_gpdb6_conf(release_build=False): res_build_image: centos6-gpdb6-image-build res_test_image: centos6-gpdb6-image-test -res_gpdb_bin: bin_gpdb6_centos6 +res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel6 res_intermediates_bin: bin_diskquota_gpdb6_rhel6_intermediates os: rhel6 +build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for centos7 -#@ def centos7_gpdb6_conf(): +#@ def centos7_gpdb6_conf(release_build=False): res_build_image: centos7-gpdb6-image-build res_test_image: centos7-gpdb6-image-test -res_gpdb_bin: bin_gpdb6_centos7 +res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel7 res_intermediates_bin: bin_diskquota_gpdb6_rhel7_intermediates os: rhel7 +build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for rhel8 -#@ def rhel8_gpdb6_conf(): +#@ def rhel8_gpdb6_conf(release_build=False): res_build_image: rhel8-gpdb6-image-build res_test_image: rhel8-gpdb6-image-test -res_gpdb_bin: bin_gpdb6_rhel8 +res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel8 res_intermediates_bin: bin_diskquota_gpdb6_rhel8_intermediates os: rhel8 +build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for ubuntu18 -#@ def ubuntu18_gpdb6_conf(): +#@ def ubuntu18_gpdb6_conf(release_build=False): res_build_image: ubuntu18-gpdb6-image-build res_test_image: ubuntu18-gpdb6-image-test -res_gpdb_bin: bin_gpdb6_ubuntu18 +res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 res_intermediates_bin: bin_diskquota_gpdb6_ubuntu18_intermediates os: ubuntu18.04 +build_type: #@ "Release" if release_build else "Debug" #@ end #! The entry point of a pipeline. The job name must be 'entrance'. @@ -85,6 +90,7 @@ input_mapping: bin_gpdb: #@ conf["res_gpdb_bin"] params: DISKQUOTA_OS: #@ conf["os"] + BUILD_TYPE: #@ conf["build_type"] #@ end #@ def _test_task(conf): diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 93ce5ba0650..573c0b8e026 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -111,30 +111,58 @@ resources: password: ((extensions-gcs-service-account-key)) # gpdb binary on gcs is located as different folder for different version +# Latest build with assertion enabled: +# --enable-cassert --enable-tap-tests --enable-debug-extensions +- name: bin_gpdb6_centos6_debug + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.debug.tar.gz +- name: bin_gpdb6_centos7_debug + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.debug.tar.gz +- name: bin_gpdb6_rhel8_debug + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.debug.tar.gz +- name: bin_gpdb6_ubuntu18_debug + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.debug.tar.gz +# Latest release candidates, no fault-injector, no assertion: +# --disable-debug-extensions --disable-tap-tests --enable-ic-proxy - name: bin_gpdb6_centos6 type: gcs source: - bucket: ((gcs-bucket-intermediates)) + bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - versioned_file: 6X_STABLE/bin_gpdb_centos6/bin_gpdb.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-centos6.tar.gz - name: bin_gpdb6_centos7 type: gcs source: - bucket: ((gcs-bucket-intermediates)) + bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - versioned_file: 6X_STABLE/bin_gpdb_centos7/bin_gpdb.tar.gz -- name: bin_gpdb6_ubuntu18 + regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-centos7.tar.gz +- name: bin_gpdb6_rhel8 type: gcs source: - bucket: ((gcs-bucket-intermediates)) + bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - versioned_file: 6X_STABLE/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz -- name: bin_gpdb6_rhel8 + regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-rhel8.tar.gz +- name: bin_gpdb6_ubuntu18 type: gcs source: - bucket: ((gcs-bucket-intermediates)) + bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - versioned_file: 6X_STABLE/bin_gpdb_rhel8/bin_gpdb.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-ubuntu18.04.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index cadb9285dce..a725037c584 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -13,7 +13,9 @@ function pkg() { pushd /home/gpadmin/diskquota_artifacts local last_release_path last_release_path=$(readlink -e /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) - cmake /home/gpadmin/diskquota_src -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" + cmake /home/gpadmin/diskquota_src \ + -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" cmake --build . --target package popd } diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh index 3a4b2e2411c..4ff2a78b272 100755 --- a/concourse/scripts/entry.sh +++ b/concourse/scripts/entry.sh @@ -117,7 +117,7 @@ setup_gpadmin() { # Extract gpdb binary function install_gpdb() { [ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel - tar -xzf "${CONCOURSE_WORK_DIR}/bin_gpdb/bin_gpdb.tar.gz" -C /usr/local/greenplum-db-devel + tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/*.tar.gz -C /usr/local/greenplum-db-devel chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel } @@ -127,9 +127,9 @@ function install_gpdb() { ## location, we fixes this issue by creating a symbolic link for it. function create_fake_gpdb_src() { local fake_gpdb_src - fake_gpdb_src=/tmp/build/"$(\ - grep -rnw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ - head -n 1 | awk -F"/" '{print $(NF-1)}')" + fake_gpdb_src="$(\ + grep -rhw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ + head -n 1 | awk '{ print $NF; }')" if [ -d "${fake_gpdb_src}" ]; then echo "Fake gpdb source directory has been configured." @@ -142,8 +142,10 @@ function create_fake_gpdb_src() { --disable-orca --disable-gpcloud --enable-debug-extensions popd - mkdir -p "${fake_gpdb_src}" - ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}/gpdb_src" + local fake_root + fake_root=$(dirname "${fake_gpdb_src}") + mkdir -p "${fake_root}" + ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}" } # Setup common environment diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index 5fe11afa0c4..cacf0fb2c9b 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -17,3 +17,4 @@ run: - build params: DISKQUOTA_OS: + BUILD_TYPE: diff --git a/diskquota.c b/diskquota.c index 27bf04a4628..3c1aa2f5a3b 100644 --- a/diskquota.c +++ b/diskquota.c @@ -226,8 +226,14 @@ disk_quota_sigusr1(SIGNAL_ARGS) static void define_guc_variables(void) { +#if DISKQUOTA_DEBUG + const int min_naptime = 0; +#else + const int min_naptime = 1; +#endif + DefineCustomIntVariable("diskquota.naptime", "Duration between each check (in seconds).", NULL, &diskquota_naptime, - 2, 0, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + 2, min_naptime, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_active_tables", "Max number of active tables monitored by disk-quota.", NULL, &diskquota_max_active_tables, 1 * 1024 * 1024, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index af2b8202d14..7d7c1bc79a3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,5 +1,20 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) +# Test cases need fault injector needs to be excluded from release build. +# GPDB release build doesn't support fault injector +list(APPEND exclude_regress_for_release + test_fetch_table_stat) +list(APPEND exclude_isolation2_for_release + test_relation_size + test_blackmap + test_vacuum + test_truncate + test_worker_timeout) +if (NOT CMAKE_BUILD_TYPE STREQUAL "Release") + set(load_inject_fault_opts + --dbname=contrib_regression) +endif() + RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file @@ -8,8 +23,10 @@ RegressTarget_Add(regress RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule + EXCLUDE + ${exclude_regress_for_release} REGRESS_OPTS - --load-extension=gp_inject_fault + ${load_inject_fault_opts} --dbname=contrib_regression) RegressTarget_Add(isolation2 @@ -22,8 +39,10 @@ RegressTarget_Add(isolation2 RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule + EXCLUDE + ${exclude_isolation2_for_release} REGRESS_OPTS - --load-extension=gp_inject_fault + ${load_inject_fault_opts} --dbname=isolation2test) add_custom_target(installcheck) diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out index 8916425ef0b..1fdc9c57b39 100644 --- a/tests/isolation2/expected/config.out +++ b/tests/isolation2/expected/config.out @@ -10,11 +10,14 @@ (exited with code 0) -- Show the values of all GUC variables +--start_ignore +-- naptime cannot be 0 for release build 1: SHOW diskquota.naptime; diskquota.naptime ------------------- 0 (1 row) +--end_ignore 1: SHOW diskquota.max_active_tables; diskquota.max_active_tables ----------------------------- diff --git a/tests/isolation2/expected/reset_config.out b/tests/isolation2/expected/reset_config.out index 5fb1fb9f135..3d076b36cca 100644 --- a/tests/isolation2/expected/reset_config.out +++ b/tests/isolation2/expected/reset_config.out @@ -1,4 +1,4 @@ -!\retcode gpconfig -c diskquota.naptime -v 10; +!\retcode gpconfig -c diskquota.naptime -v 2; (exited with code 0) !\retcode gpstop -u; (exited with code 0) @@ -6,5 +6,5 @@ 1: SHOW diskquota.naptime; diskquota.naptime ------------------- - 10 + 2 (1 row) diff --git a/tests/isolation2/expected/test_postmaster_restart.out b/tests/isolation2/expected/test_postmaster_restart.out index f08f1c31937..f15ccfd4c1e 100644 --- a/tests/isolation2/expected/test_postmaster_restart.out +++ b/tests/isolation2/expected/test_postmaster_restart.out @@ -26,7 +26,7 @@ SET (1 row) -- expect fail -1: CREATE TABLE t1 AS SELECT generate_series(1,1000000); +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); ERROR: schema's disk space quota exceeded with name:157893 (seg0 127.0.0.1:6002 pid=1025673) 1q: ... @@ -112,7 +112,7 @@ SET t (1 row) -- expect fail -1: CREATE TABLE t2 AS SELECT generate_series(1,1000000); +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); ERROR: schema's disk space quota exceeded with name:158089 (seg0 127.0.0.1:6002 pid=1027799) -- enlarge the quota limits 1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); diff --git a/tests/isolation2/sql/config.sql b/tests/isolation2/sql/config.sql index 21c35e1796f..855ad7e531d 100644 --- a/tests/isolation2/sql/config.sql +++ b/tests/isolation2/sql/config.sql @@ -9,6 +9,9 @@ CREATE DATABASE diskquota; !\retcode gpstop -raf; -- Show the values of all GUC variables +--start_ignore +-- naptime cannot be 0 for release build 1: SHOW diskquota.naptime; +--end_ignore 1: SHOW diskquota.max_active_tables; 1: SHOW diskquota.worker_timeout; diff --git a/tests/isolation2/sql/reset_config.sql b/tests/isolation2/sql/reset_config.sql index bfc2735d46c..129fe7b95b4 100644 --- a/tests/isolation2/sql/reset_config.sql +++ b/tests/isolation2/sql/reset_config.sql @@ -1,4 +1,4 @@ -!\retcode gpconfig -c diskquota.naptime -v 10; +!\retcode gpconfig -c diskquota.naptime -v 2; !\retcode gpstop -u; 1: SHOW diskquota.naptime; diff --git a/tests/isolation2/sql/test_postmaster_restart.sql b/tests/isolation2/sql/test_postmaster_restart.sql index dba52dc3e63..bc78c241c00 100644 --- a/tests/isolation2/sql/test_postmaster_restart.sql +++ b/tests/isolation2/sql/test_postmaster_restart.sql @@ -8,7 +8,7 @@ 1: SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail -1: CREATE TABLE t1 AS SELECT generate_series(1,1000000); +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); 1q: -- launcher should exist @@ -40,7 +40,7 @@ 1: SET search_path TO postmaster_restart_s; 1: SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail -1: CREATE TABLE t2 AS SELECT generate_series(1,1000000); +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -- enlarge the quota limits 1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); 1: SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/expected/config.out b/tests/regress/expected/config.out index c147f0adde4..3b1d9761772 100644 --- a/tests/regress/expected/config.out +++ b/tests/regress/expected/config.out @@ -1,11 +1,13 @@ \c -- Show the values of all GUC variables +-- start_ignore +-- naptime cannot be 0 on Release build, so it will be 2 SHOW diskquota.naptime; diskquota.naptime ------------------- 0 (1 row) - +-- end_ignore SHOW diskquota.max_active_tables; diskquota.max_active_tables ----------------------------- diff --git a/tests/regress/expected/reset_config.out b/tests/regress/expected/reset_config.out index 3b4afdbe031..c65092e54b4 100644 --- a/tests/regress/expected/reset_config.out +++ b/tests/regress/expected/reset_config.out @@ -1,6 +1,6 @@ SHOW diskquota.naptime; diskquota.naptime ------------------- - 10 + 2 (1 row) diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out index e3edc4395e9..b3b96a8f694 100644 --- a/tests/regress/expected/test_ctas_pause.out +++ b/tests/regress/expected/test_ctas_pause.out @@ -15,7 +15,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect fail NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ERROR: schema's disk space quota exceeded with name:110528 (seg1 127.0.0.1:6003 pid=73892) @@ -25,7 +25,7 @@ SELECT diskquota.pause(); (1 row) -CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect succeed NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- disable hardlimit and do some clean-ups. diff --git a/tests/regress/expected/test_default_tablespace.out b/tests/regress/expected/test_default_tablespace.out index cd3556757c8..36c380c6656 100644 --- a/tests/regress/expected/test_default_tablespace.out +++ b/tests/regress/expected/test_default_tablespace.out @@ -31,6 +31,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to success INSERT INTO t SELECT generate_series(1, 100); INSERT INTO t SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect insert to fail INSERT INTO t SELECT generate_series(1, 1000000); ERROR: tablespace:pg_default role:role1 diskquota exceeded @@ -96,6 +102,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to success CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect insert to fail INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); ERROR: tablespace:custom_tablespace role:role1 diskquota exceeded diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 71c49c20494..81aac49c334 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -45,7 +45,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail +INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail ERROR: schema's disk space quota exceeded with name:16933 (seg2 127.0.0.1:6004 pid=24622) \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out index 5bbc625d4f6..398d9c5f006 100644 --- a/tests/regress/expected/test_update_db_cache.out +++ b/tests/regress/expected/test_update_db_cache.out @@ -41,7 +41,7 @@ WARNING: database is not empty, please run `select diskquota.init_table_size_ta -- FIXME: We cannot use wait_for_worker_new_epoch() here because -- diskquota.state is not clean. Change sleep() to wait() after removing -- diskquota.state -SELECT pg_sleep(1); +SELECT pg_sleep(5); pg_sleep ---------- diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index b5ac3df7221..1b8c8dfafc3 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -11,7 +11,9 @@ CREATE DATABASE diskquota; \c -- Show the values of all GUC variables +-- start_ignore SHOW diskquota.naptime; +-- end_ignore SHOW diskquota.max_active_tables; SHOW diskquota.worker_timeout; SHOW diskquota.hard_limit; diff --git a/tests/regress/sql/reset_config.sql b/tests/regress/sql/reset_config.sql index 9f9842a2828..7d0330fbcdf 100644 --- a/tests/regress/sql/reset_config.sql +++ b/tests/regress/sql/reset_config.sql @@ -1,5 +1,5 @@ --start_ignore -\! gpconfig -c diskquota.naptime -v 10 +\! gpconfig -c diskquota.naptime -v 2 \! gpstop -u --end_ignore diff --git a/tests/regress/sql/test_ctas_pause.sql b/tests/regress/sql/test_ctas_pause.sql index 4e5e8df4bd5..020f393177f 100644 --- a/tests/regress/sql/test_ctas_pause.sql +++ b/tests/regress/sql/test_ctas_pause.sql @@ -7,11 +7,11 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect fail SELECT diskquota.pause(); -CREATE TABLE t1 (i) AS SELECT generate_series(1,1000000) DISTRIBUTED BY (i); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect succeed -- disable hardlimit and do some clean-ups. \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null diff --git a/tests/regress/sql/test_default_tablespace.sql b/tests/regress/sql/test_default_tablespace.sql index 1bf915cbf81..91923a99ac8 100644 --- a/tests/regress/sql/test_default_tablespace.sql +++ b/tests/regress/sql/test_default_tablespace.sql @@ -23,6 +23,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to success INSERT INTO t SELECT generate_series(1, 100); INSERT INTO t SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail INSERT INTO t SELECT generate_series(1, 1000000); @@ -64,6 +65,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to success CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); diff --git a/tests/regress/sql/test_drop_after_pause.sql b/tests/regress/sql/test_drop_after_pause.sql index b24e2ceb67a..ec51a8ddc24 100644 --- a/tests/regress/sql/test_drop_after_pause.sql +++ b/tests/regress/sql/test_drop_after_pause.sql @@ -18,7 +18,7 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); -INSERT INTO SX.a SELECT generate_series(1,1000000); -- expect insert fail +INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql index b35d84cd93f..5256fbd8031 100644 --- a/tests/regress/sql/test_update_db_cache.sql +++ b/tests/regress/sql/test_update_db_cache.sql @@ -31,7 +31,7 @@ CREATE EXTENSION diskquota; -- FIXME: We cannot use wait_for_worker_new_epoch() here because -- diskquota.state is not clean. Change sleep() to wait() after removing -- diskquota.state -SELECT pg_sleep(1); +SELECT pg_sleep(5); -- Should find nothing since t_no_extension is not recorded. SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) From 12a20fad2a5fdf5387d1d074391e2f7667659eca Mon Sep 17 00:00:00 2001 From: yihong Date: Mon, 21 Mar 2022 13:56:43 +0800 Subject: [PATCH 168/330] feat: github actions for clang-check ci (#185) * Feat: use GitHub Actions for clang-check `CI` --- .github/workflows/check.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/check.yml diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 00000000000..293a7b94e8b --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,22 @@ +name: Check + +on: + pull_request: + paths-ignore: + - "docs/**" + - "cmake/**" + - "test/**" + - "upgrade_test/**" + - "*.md" + - "*.sql" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: use clang-format 13 + run: pip3 install clang-format==13.0.1 + - name: Check clang code style + run: git ls-files *.{c,h} | xargs clang-format -i --style=file && git diff --exit-code + From 24bc5fe71775425981b8d74cbf571d9e7f5194c5 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 21 Mar 2022 18:47:10 +0800 Subject: [PATCH 169/330] Run same regress test multiple times (#188) - Add RUN_TIMES to RegressTarget_Add to run the same regress test multiple times. - Rename script for consistency. --- cmake/Regress.cmake | 26 ++++++++++++---- cmake/regress_loop.sh | 13 ++++++++ ...w_regress_diff.sh => regress_show_diff.sh} | 0 tests/CMakeLists.txt | 30 +++++++++++++++++++ 4 files changed, 64 insertions(+), 5 deletions(-) create mode 100755 cmake/regress_loop.sh rename cmake/{show_regress_diff.sh => regress_show_diff.sh} (100%) diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index 77df6650335..1726613d162 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -11,13 +11,18 @@ # [EXCLUDE ...] # [REGRESS_OPTS ...] # [REGRESS_TYPE isolation2/regress] +# [RUN_TIMES ] # ) # All the file path can be the relative path to ${CMAKE_CURRENT_SOURCE_DIR}. # A bunch of diff targets will be created as well for comparing the regress results. The diff # target names like diff__ # -# NOTE: To use this cmake file in another project, the show_regress_diff.sh needs to be placed -# alongside. +# Use RUN_TIMES to specify how many times the regress tests should be executed. A negative RUN_TIMES +# will run the test infinite times. +# +# NOTE: To use this cmake file in another project, below files needs to be placed alongside: +# - regress_show_diff.sh +# - regress_loop.sh # # Example: # RegressTarget_Add(installcheck_avro_fmt @@ -50,7 +55,7 @@ function(RegressTarget_Add name) cmake_parse_arguments( arg "" - "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE" + "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES" "REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" ${ARGN}) if (NOT arg_EXPECTED_DIR) @@ -114,6 +119,17 @@ function(RegressTarget_Add name) set(ln_data_dir_CMD ln -s ${data_DIR} data) endif() + set(regress_command + ${regress_BIN} --psqldir='${PG_BIN_DIR}' ${regress_opts_arg} ${regress_arg}) + if (arg_RUN_TIMES) + set(test_command + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/regress_loop.sh + ${arg_RUN_TIMES} + ${regress_command}) + else() + set(test_command ${regress_command}) + endif() + # Create the target add_custom_target( ${name} @@ -128,9 +144,9 @@ function(RegressTarget_Add name) COMMAND rm -f data COMMAND ${ln_data_dir_CMD} COMMAND - ${regress_BIN} --psqldir='${PG_BIN_DIR}' ${regress_opts_arg} ${regress_arg} + ${test_command} || - ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/show_regress_diff.sh ${working_DIR} + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/regress_show_diff.sh ${working_DIR} ) if(arg_REGRESS_TYPE STREQUAL isolation2) diff --git a/cmake/regress_loop.sh b/cmake/regress_loop.sh new file mode 100755 index 00000000000..48cf94b6eed --- /dev/null +++ b/cmake/regress_loop.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Usage: +# regress_loop.sh +# Use negative number for infinite loop + +run_times=$1 +count=1 + +while [ "$run_times" -lt 0 ] || [ "$count" -le "$run_times" ]; do + echo "Run regress ${count} times" + "${@:2}" || exit 1 + count=$(( count + 1 )) +done diff --git a/cmake/show_regress_diff.sh b/cmake/regress_show_diff.sh similarity index 100% rename from cmake/show_regress_diff.sh rename to cmake/regress_show_diff.sh diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7d7c1bc79a3..d193d9e65dd 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -47,3 +47,33 @@ RegressTarget_Add(isolation2 add_custom_target(installcheck) add_dependencies(installcheck isolation2 regress) + +# Example to run test_truncate infinite times +# RegressTarget_Add(regress_config +# INIT_FILE +# ${CMAKE_CURRENT_SOURCE_DIR}/init_file +# SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql +# EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected +# RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results +# DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data +# REGRESS +# config test_create_extension +# REGRESS_OPTS +# ${load_inject_fault_opts} +# --dbname=contrib_regression) +# RegressTarget_Add(regress_truncate_loop +# INIT_FILE +# ${CMAKE_CURRENT_SOURCE_DIR}/init_file +# SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql +# EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected +# RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results +# DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data +# REGRESS +# test_truncate +# RUN_TIMES -1 +# REGRESS_OPTS +# ${load_inject_fault_opts} +# --dbname=contrib_regression +# --use-existing) +# add_dependencies(regress_truncate_loop regress_config) +# add_dependencies(installcheck regress_truncate_loop) From b780ef090208850c3bc08e1a6244c9fcd91868ee Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 22 Mar 2022 10:44:34 +0800 Subject: [PATCH 170/330] slack-alert and PR pipeline fix (#187) - Add a slack alert when commit pipeline fails - Add exit job to set successful state. That state cannot be set by individual job since at that point, the whole pipeline is not finished yet. --- concourse/pipeline/dev.yml | 8 ++-- concourse/pipeline/job_def.lib.yml | 53 +++++++++++++++++++++++--- concourse/pipeline/pr.yml | 6 +++ concourse/pipeline/res_def.yml | 9 +++++ concourse/pipeline/trigger_def.lib.yml | 7 ++++ 5 files changed, 74 insertions(+), 9 deletions(-) diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index b033e8b6488..ea7fe5b8490 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -14,10 +14,10 @@ #@ res_type_map = {} #@ trigger = commit_dev_trigger(res_map) #@ confs= [ -#@ centos6_gpdb6_conf(release_build=True), -#@ centos7_gpdb6_conf(release_build=True), -#@ rhel8_gpdb6_conf(release_build=True), -#@ ubuntu18_gpdb6_conf(release_build=True) +#@ centos6_gpdb6_conf(release_build=False), +#@ centos7_gpdb6_conf(release_build=False), +#@ rhel8_gpdb6_conf(release_build=False), +#@ ubuntu18_gpdb6_conf(release_build=False) #@ ] jobs: #@ param = { diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index fa150031296..f32d400f8bd 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -57,6 +57,9 @@ plan: - trigger: true _: #@ template.replace(to_get) #@ end +#@ for to_put in trigger["to_put"]: +- #@ to_put +#@ end #@ end #! Like the entrance_job, with more static checks. @@ -71,6 +74,9 @@ plan: - trigger: true _: #@ template.replace(to_get) #@ end +#@ for to_put in trigger["to_put"]: +- #@ to_put +#@ end - get: clang-format-image - task: check_clang_format image: clang-format-image @@ -82,6 +88,44 @@ plan: path: diskquota_src/concourse/scripts/check-clang-format.sh #@ end +#@ def exit_job(param): +#@ trigger = param["trigger"] +#@ confs = param["confs"] +#@ passed_jobs = [] +#@ for conf in confs: +#@ passed_jobs.append(build_test_job_name(conf)) +#@ end +name: exit +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +on_success: #@ trigger["on_success"] +plan: +#@ for to_get in trigger["to_get"]: +- passed: passed_jobs + trigger: true + _: #@ template.replace(to_get) +#@ end +#@ end + +#@ def exit_pr_job(param): +#@ trigger = param["trigger"] +#@ confs = param["confs"] +#@ passed_jobs = [] +#@ for conf in confs: +#@ passed_jobs.append(build_test_job_name(conf)) +#@ end +name: exit_pr +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +on_success: #@ trigger["on_success"] +plan: +#@ for to_get in trigger["to_get"]: +- passed: #@ passed_jobs + trigger: true + _: #@ template.replace(to_get) +#@ end +#@ end + #@ def _build_task(conf): task: #@ "build_" + conf["os"] file: diskquota_src/concourse/tasks/build_diskquota.yml @@ -105,6 +149,9 @@ params: DISKQUOTA_OS: #@ conf["os"] #@ end +#@ def build_test_job_name(conf): +#@ return "build_test_" + conf["os"] +#@ end #@ def build_test_job(param): #@ res_map = param["res_map"] #@ trigger = param["trigger"] @@ -113,9 +160,8 @@ params: #@ add_res_by_name(res_map, "bin_cmake") #@ add_res_by_name(res_map, "bin_diskquota_intermediates") #@ add_res_by_conf(res_map, conf) -name: #@ "build_test_" + conf["os"] +name: #@ build_test_job_name(conf) max_in_flight: 10 -on_success: #@ trigger["on_success"] on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: @@ -124,9 +170,6 @@ plan: trigger: true _: #@ template.replace(to_get) #@ end -#@ for to_put in trigger["to_put"]: -- #@ to_put -#@ end - in_parallel: - get: gpdb_src resource: #@ param["gpdb_src"] diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 716fde9e1bb..917f818816a 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -1,5 +1,6 @@ #@ load("job_def.lib.yml", #@ "entrance_check_job", +#@ "exit_pr_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -36,6 +37,11 @@ jobs: #@ } - #@ build_test_job(param) #@ end +#@ param = { +#@ "trigger": trigger, +#@ "confs": confs +#@ } +- #@ exit_pr_job(param) resources: #@ declare_res(res_type_map, res_map) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 573c0b8e026..5dcecb74ac7 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -11,6 +11,11 @@ resource_types: source: repository: teliaoss/github-pr-resource +- name: slack-alert + type: docker-image + source: + repository: arbourd/concourse-slack-alert-resource + resources: # Pull Request - name: diskquota_pr @@ -229,3 +234,7 @@ resources: bucket: gpdb-extensions-concourse-resources json_key: ((extensions-gcs-service-account-key)) regexp: dependencies/cmake-(.*)-linux-x86_64.sh +- name: slack_notify + type: slack-alert + source: + url: ((extensions-slack-webhook)) diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml index 3a1f12b6d2b..fa8f51bfbd0 100644 --- a/concourse/pipeline/trigger_def.lib.yml +++ b/concourse/pipeline/trigger_def.lib.yml @@ -33,6 +33,7 @@ on_success: #! Commit trigger. For master pipelines #@ def commit_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") +#@ add_res_by_name(res_map, "slack_notify") to_get: - get: diskquota_src resource: diskquota_commit @@ -41,7 +42,13 @@ to_put: #@ [] #! Unfortunately it doesn't work with Concourse 5. on_success: on_failure: + put: slack_notify + params: + alert_type: failed on_error: + put: slack_notify + params: + alert_type: errored #@ end #! Commit trigger. For dev pipelines. No webhook From 850c6c418471c513141845a30f1f8c5eb8e52c2c Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Wed, 23 Mar 2022 16:14:47 +0800 Subject: [PATCH 171/330] Don't format errno as '%d' in error messages. (#190) Actually, errno *isn't* strictly an integer. It depends on your operating system. Instead of formatting it as an integer, we should transform it to C style string and format it as '%s'. --- diskquota.c | 22 ++++++++++++---------- diskquota_utility.c | 3 ++- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/diskquota.c b/diskquota.c index 3c1aa2f5a3b..7c5c0c9b6db 100644 --- a/diskquota.c +++ b/diskquota.c @@ -595,8 +595,8 @@ create_monitor_db_table(void) ret_code = SPI_execute(sql, false, 0); if (ret_code != SPI_OK_UTILITY) { - ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", errno: %d, ret_code: %d.", sql, - errno, ret_code))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", reason: %s, ret_code: %d.", + sql, strerror(errno), ret_code))); } } PG_CATCH(); @@ -642,12 +642,13 @@ start_workers_from_dblist(void) PushActiveSnapshot(GetTransactionSnapshot()); ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, errno: %d, return code: %d.", errno, ret))); + ereport(ERROR, + (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", strerror(errno), ret))); ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, - (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', errno: %d, return code: %d", - errno, ret))); + (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', reason: %s, return code: %d.", + strerror(errno), ret))); tupdesc = SPI_tuptable->tupdesc; if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) { @@ -895,8 +896,8 @@ add_dbid_to_database_list(Oid dbid) if (ret != SPI_OK_SELECT) ereport(ERROR, (errmsg("[diskquota launcher] error occured while checking database_list, " - " code = %d errno = %d", - ret, errno))); + " code: %d, reason: %s.", + ret, strerror(errno)))); if (SPI_processed == 1) { @@ -911,8 +912,8 @@ add_dbid_to_database_list(Oid dbid) if (ret != SPI_OK_INSERT || SPI_processed != 1) ereport(ERROR, (errmsg("[diskquota launcher] error occured while updating database_list, " - " code = %d errno = %d", - ret, errno))); + " code: %d, reason: %s.", + ret, strerror(errno)))); return; } @@ -937,7 +938,8 @@ del_dbid_from_database_list(Oid dbid) NULL, false, 0); ereportif(ret != SPI_OK_DELETE, ERROR, - (errmsg("[diskquota launcher] del_dbid_from_database_list: errno: %d, ret_code: %d.", errno, ret))); + (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", strerror(errno), + ret))); } /* diff --git a/diskquota_utility.c b/diskquota_utility.c index 6c05968302e..c1e8deb6d16 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -509,7 +509,8 @@ is_database_empty(void) " pg_namespace AS n " "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'", true, 0); - if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select pg_class and pg_namespace table: error code %d", errno); + if (ret != SPI_OK_SELECT) + elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(errno)); tupdesc = SPI_tuptable->tupdesc; /* check sql return value whether database is empty */ From a8def57af710d73aed9ce764d24be814c834119a Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 22 Mar 2022 16:03:17 +0800 Subject: [PATCH 172/330] fix context corrupted when dirty data exist in pg_extension --- diskquota_utility.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index c1e8deb6d16..8c49c6f2d93 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1219,7 +1219,8 @@ worker_spi_get_extension_version(int *major, int *minor) { ereport(WARNING, (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); - return -1; + ret = -1; + goto out; } bool is_null = false; @@ -1231,7 +1232,8 @@ worker_spi_get_extension_version(int *major, int *minor) { ereport(WARNING, (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. catalog might be corrupted"))); - return -1; + ret = -1; + goto out; } ret = sscanf(version, "%d.%d", major, minor); @@ -1241,7 +1243,8 @@ worker_spi_get_extension_version(int *major, int *minor) ereport(WARNING, (errmsg("[diskquota] 'extversion' is '%s' in pg_class.pg_extension which is not valid format. " "catalog might be corrupted", version))); - return -1; + ret = -1; + goto out; } ret = 0; From a21964c266534774051b09f49355439d76e3b502 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 24 Mar 2022 15:47:43 +0800 Subject: [PATCH 173/330] Fix cmake build on Mac (#191) - Mac needs extra linker options that the symbols in the postgres executable can be found. - Mac's grep doesn't have '-P' option. - To make a extension so file, we use a 'module' target instead of 'shared' on purpose. Since "A SHARED library may be marked with the FRAMEWORK target property to create an macOS Framework." Thus, the global link flags should be set through CMAKE_MODULE_LINKER_FLAGS than CMAK_SHARED_LINKER_FLAGS. --- CMakeLists.txt | 6 ++++-- tests/data/current_binary_name | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 94f4aed98cb..82ebe5bbffd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,10 +30,12 @@ set(CMAKE_C_FLAGS_RELEASE "" CACHE set(CMAKE_C_FLAGS_DEBUG "-DDISKQUOTA_DEBUG" CACHE STRING "Flags for DEBUG build" FORCE) # set link flags for all sub-projects -set(CMAKE_SHARED_LINKER_FLAGS "${PG_LD_FLAGS}") +set(CMAKE_MODULE_LINKER_FLAGS "${PG_LD_FLAGS}") +if (APPLE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -bundle_loader ${PG_BIN_DIR}/postgres") +endif() # set c and ld flags for all projects set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${PG_C_FLAGS}") -set(CMAKE_MODULE_LINKER_FLAGS_INIT "${CMAKE_EXE_LINKER_FLAGS} ${PG_LD_FLAGS}") # generate version if(NOT DEFINED DISKQUOTA_VERSION) diff --git a/tests/data/current_binary_name b/tests/data/current_binary_name index 6f9daf009c6..2e2b0e7a1d8 100755 --- a/tests/data/current_binary_name +++ b/tests/data/current_binary_name @@ -1,9 +1,9 @@ #!/bin/bash -cd "$(dirname "$0")" +cd "$(dirname "$0")" || exit 1 -if [ $(grep -P '^1.0' ../../VERSION) ]; then +if grep -q -E '^1.0' ../../VERSION; then echo -n "diskquota.so" else - echo -n "diskquota-$(grep -o -P '^\d+.\d+' ../../VERSION).so" + echo -n "diskquota-$(grep -o -E '^[0-9]*.[0-9]*' ../../VERSION).so" fi From bb34e22a5bb604a590247108d38ce29618cc4ffb Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 30 Mar 2022 09:46:25 +0800 Subject: [PATCH 174/330] Bring tests back on CI (#193) Some tests were accidentally excluded in Debug build. --- tests/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d193d9e65dd..80eb18c8dc3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -2,6 +2,7 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) # Test cases need fault injector needs to be excluded from release build. # GPDB release build doesn't support fault injector +if (CMAKE_BUILD_TYPE STREQUAL "Release") list(APPEND exclude_regress_for_release test_fetch_table_stat) list(APPEND exclude_isolation2_for_release @@ -10,9 +11,8 @@ list(APPEND exclude_isolation2_for_release test_vacuum test_truncate test_worker_timeout) -if (NOT CMAKE_BUILD_TYPE STREQUAL "Release") - set(load_inject_fault_opts - --dbname=contrib_regression) +else() + set(load_inject_fault_opts --load-extension=gp_inject_fault) endif() RegressTarget_Add(regress From 423269b5ddff943de7f6b8aa72a0813155fc5d51 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 30 Mar 2022 17:30:57 +0800 Subject: [PATCH 175/330] Missing EmitErrorReport before FlushErrorState (#155) FlushErrorState() will clear the error state. To get the error log in server logs, EmitErrorReport() needs to be called before flushing. All other places with FlushErrorState() have the Emit before in the codebase. For diskquota bgworker, use `SearchSysCache1`. Because the bgworker should be error-tolerant to keep it running in the background, we can't throw an error. If we `EmitErrorReport` in bgworker, then the results returned by `CdbDispatchCommand` would be incorrect. On the other hand, the diskquota launcher can throw an error if needed. Co-authored-by: Chen Mulong Co-authored-by: Xuebin Su --- diskquota.h | 1 + diskquota_utility.c | 2 +- gp_activetable.c | 2 +- quotamodel.c | 23 ++++++++++++ relation_cache.c | 35 +++++++++++++------ relation_cache.h | 2 +- .../expected/test_default_tablespace.out | 10 ++++-- tests/regress/sql/test_default_tablespace.sql | 1 + 8 files changed, 60 insertions(+), 16 deletions(-) diff --git a/diskquota.h b/diskquota.h index 5b12abaf2d2..d0798fdf318 100644 --- a/diskquota.h +++ b/diskquota.h @@ -152,6 +152,7 @@ extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); +extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); extern List *diskquota_get_index_list(Oid relid); extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); diff --git a/diskquota_utility.c b/diskquota_utility.c index 8c49c6f2d93..612850be93f 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -940,7 +940,6 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType }, NULL, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } else if (SPI_processed > 0 && quota_limit_mb < 0) { ret = SPI_execute_with_args("delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2", 2, @@ -1416,6 +1415,7 @@ diskquota_relation_open(Oid relid, LOCKMODE mode) { InterruptHoldoffCount = SavedInterruptHoldoffCount; HOLD_INTERRUPTS(); + EmitErrorReport(); FlushErrorState(); RESUME_INTERRUPTS(); } diff --git a/gp_activetable.c b/gp_activetable.c index ddca783148c..06ca5cfa815 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -675,7 +675,7 @@ get_active_tables_oid(void) if (relOid != InvalidOid) { - prelid = get_primary_table_oid(relOid); + prelid = get_primary_table_oid(relOid, true); active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); if (active_table_entry && !found) { diff --git a/quotamodel.c b/quotamodel.c index 0887cdab622..4b7c3427bef 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1318,6 +1318,29 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table return found; } +/* + * Given table oid, search for namespace and name. + * Memory relname points to should be pre-allocated at least NAMEDATALEN bytes. + */ +bool +get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname) +{ + HeapTuple tp; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + bool found = HeapTupleIsValid(tp); + if (found) + { + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); + + *nsOid = reltup->relnamespace; + memcpy(relname, reltup->relname.data, NAMEDATALEN); + + ReleaseSysCache(tp); + } + return found; +} + static bool check_blackmap_by_relfilenode(RelFileNode relfilenode) { diff --git a/relation_cache.c b/relation_cache.c index 33a3284cf89..91a1f9959ba 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -173,7 +173,7 @@ update_relation_cache(Oid relid) memcpy(relid_entry, &relid_entry_data, sizeof(DiskQuotaRelidCacheEntry)); LWLockRelease(diskquota_locks.relation_cache_lock); - prelid = get_primary_table_oid(relid); + prelid = get_primary_table_oid(relid, false); if (OidIsValid(prelid) && prelid != relid) { LWLockAcquire(diskquota_locks.relation_cache_lock, LW_EXCLUSIVE); @@ -188,23 +188,36 @@ update_relation_cache(Oid relid) } static Oid -parse_primary_table_oid(Oid relid) +parse_primary_table_oid(Oid relid, bool on_bgworker) { Relation rel; Oid namespace; Oid parsed_oid; char relname[NAMEDATALEN]; - rel = diskquota_relation_open(relid, NoLock); - if (rel == NULL) + /* + * diskquota bgworker should be error tolerant to keep it running in background, + * so we can't throw an error. + * On the other hand, diskquota launcher can throw an error if needed. + */ + if (on_bgworker) + { + if (!get_rel_name_namespace(relid, &namespace, relname)) + { + return InvalidOid; + } + } else { - return InvalidOid; + rel = diskquota_relation_open(relid, NoLock); + if (rel == NULL) + { + return InvalidOid; + } + namespace = rel->rd_rel->relnamespace; + memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); + relation_close(rel, NoLock); } - namespace = rel->rd_rel->relnamespace; - memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); - relation_close(rel, NoLock); - parsed_oid = diskquota_parse_primary_table_oid(namespace, relname); if (OidIsValid(parsed_oid)) { @@ -214,13 +227,13 @@ parse_primary_table_oid(Oid relid) } Oid -get_primary_table_oid(Oid relid) +get_primary_table_oid(Oid relid, bool on_bgworker) { DiskQuotaRelationCacheEntry *relation_entry; Oid cached_prelid = relid; Oid parsed_prelid; - parsed_prelid = parse_primary_table_oid(relid); + parsed_prelid = parse_primary_table_oid(relid, on_bgworker); if (OidIsValid(parsed_prelid)) { return parsed_prelid; diff --git a/relation_cache.h b/relation_cache.h index 62a7658285f..d9ee70db1fe 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -30,7 +30,7 @@ extern Oid get_relid_by_relfilenode(RelFileNode relfilenode); extern void remove_cache_entry(Oid relid, Oid relfilenode); extern Oid get_uncommitted_table_relid(Oid relfilenode); extern void update_relation_cache(Oid relid); -extern Oid get_primary_table_oid(Oid relid); +extern Oid get_primary_table_oid(Oid relid, bool on_bgworker); extern void remove_committed_relation_from_cache(void); extern Size calculate_table_size(Oid relid); diff --git a/tests/regress/expected/test_default_tablespace.out b/tests/regress/expected/test_default_tablespace.out index 36c380c6656..3e9e78ed9f1 100644 --- a/tests/regress/expected/test_default_tablespace.out +++ b/tests/regress/expected/test_default_tablespace.out @@ -75,7 +75,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail because of hard limits INSERT INTO t SELECT generate_series(1, 50000000); -ERROR: tablespace:1663 role:3143588 diskquota exceeded (seg0 127.0.0.1:6002 pid=2298) +ERROR: tablespace:1663 role:3050113 diskquota exceeded (seg1 127.0.0.1:6003 pid=21307) DROP TABLE IF EXISTS t; SET ROLE role1; -- database in customized tablespace @@ -153,7 +153,7 @@ DROP TABLE IF EXISTS t_in_custom_tablespace; NOTICE: table "t_in_custom_tablespace" does not exist, skipping -- expect insert to fail because of hard limits CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); -ERROR: tablespace:3143595 role:3143588 diskquota exceeded (seg1 127.0.0.1:6003 pid=3260) +ERROR: tablespace:3050120 role:3050113 diskquota exceeded (seg0 127.0.0.1:6002 pid=22270) -- clean up DROP TABLE IF EXISTS t_in_custom_tablespace; NOTICE: table "t_in_custom_tablespace" does not exist, skipping @@ -173,6 +173,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION IF EXISTS diskquota; \c contrib_regression; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP DATABASE IF EXISTS db_with_tablespace; DROP TABLESPACE IF EXISTS custom_tablespace; RESET ROLE; diff --git a/tests/regress/sql/test_default_tablespace.sql b/tests/regress/sql/test_default_tablespace.sql index 91923a99ac8..fb6e4ec63b3 100644 --- a/tests/regress/sql/test_default_tablespace.sql +++ b/tests/regress/sql/test_default_tablespace.sql @@ -101,6 +101,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION IF EXISTS diskquota; \c contrib_regression; +SELECT diskquota.wait_for_worker_new_epoch(); DROP DATABASE IF EXISTS db_with_tablespace; DROP TABLESPACE IF EXISTS custom_tablespace; From 1d30b1ec278a7120fb2957e2af1e51c5ab0a264d Mon Sep 17 00:00:00 2001 From: LXY Date: Thu, 31 Mar 2022 11:15:00 +0800 Subject: [PATCH 176/330] Insert newline before `else` (#194) Insert newline before `else` to keep aligned with gpdb repository. --- .clang-format | 1 + diskquota.c | 6 ++++-- diskquota_utility.c | 21 ++++++++++++++------- gp_activetable.c | 21 ++++++++++++++------- quotamodel.c | 18 ++++++++++++------ relation_cache.c | 3 ++- 6 files changed, 47 insertions(+), 23 deletions(-) diff --git a/.clang-format b/.clang-format index f4315b447bd..63a1d6358fc 100644 --- a/.clang-format +++ b/.clang-format @@ -35,5 +35,6 @@ BraceWrapping: BeforeCatch: true SplitEmptyFunction: false SplitEmptyRecord: false + BeforeElse: true SortIncludes: false diff --git a/diskquota.c b/diskquota.c index 7c5c0c9b6db..de7b9163eb4 100644 --- a/diskquota.c +++ b/diskquota.c @@ -130,7 +130,8 @@ _PG_init(void) { ereport(ERROR, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort."))); - } else + } + else { ereport(INFO, (errmsg("booting diskquota-" DISKQUOTA_VERSION))); } @@ -298,7 +299,8 @@ disk_quota_worker_main(Datum main_arg) snprintf(_errmsg, sizeof(_errmsg), _errfmt, times * diskquota_naptime); init_ps_display("bgworker:", "[diskquota]", dbname, _errmsg); - } else + } + else { init_ps_display("bgworker:", "[diskquota]", dbname, "v" DISKQUOTA_VERSION " is not matching with current SQL. stop working"); diff --git a/diskquota_utility.c b/diskquota_utility.c index 612850be93f..dc70c2d192c 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -379,7 +379,8 @@ dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) if (dbid == InvalidOid) { appendStringInfo(&sql, "()"); - } else + } + else { appendStringInfo(&sql, "(%d)", dbid); } @@ -854,7 +855,8 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) }, NULL, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } else if (SPI_processed > 0 && quota_limit_mb < 0) + } + else if (SPI_processed > 0 && quota_limit_mb < 0) { ret = SPI_execute_with_args("delete from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, (Oid[]){ @@ -867,7 +869,8 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) }, NULL, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); - } else if (SPI_processed > 0 && quota_limit_mb > 0) + } + else if (SPI_processed > 0 && quota_limit_mb > 0) { ret = SPI_execute_with_args( "update diskquota.quota_config set quotalimitMB = $1 where targetoid= $2 and quotatype = $3", 3, @@ -940,7 +943,8 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType }, NULL, false, 0); if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } else if (SPI_processed > 0 && quota_limit_mb < 0) + } + else if (SPI_processed > 0 && quota_limit_mb < 0) { ret = SPI_execute_with_args("delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2", 2, (Oid[]){ @@ -1104,7 +1108,8 @@ update_diskquota_db_list(Oid dbid, HASHACTION action) { ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } - } else if (action == HASH_REMOVE) + } + else if (action == HASH_REMOVE) { hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); if (!found) @@ -1360,7 +1365,8 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) totalsize += ctx.size; } return totalsize; - } else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) + } + else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) { RelationFileStatCtx ctx = {0}; ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); @@ -1373,7 +1379,8 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) relation_file_stat(0, &ctx); ao_foreach_extent_file(relation_file_stat, &ctx); return ctx.size; - } else + } + else { return 0; } diff --git a/gp_activetable.c b/gp_activetable.c index 06ca5cfa815..559f5688515 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -314,7 +314,8 @@ gp_fetch_active_tables(bool is_init) if (is_init) { load_table_size(local_table_stats_map); - } else + } + else { /* step 1: fetch active oids from all the segments */ local_active_table_oid_maps = pull_active_list_from_seg(); @@ -426,7 +427,8 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) hash_seq_init(&(cache->pos), localCacheTable); MemoryContextSwitchTo(oldcontext); - } else + } + else { isFirstCall = false; } @@ -436,7 +438,8 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) if (isFirstCall) { funcctx->user_fctx = (void *)cache; - } else + } + else { cache = (DiskQuotaSetOFCache *)funcctx->user_fctx; } @@ -521,7 +524,8 @@ get_active_tables_stats(ArrayType *array) if (bitmap && (*bitmap & bitmask) == 0) { continue; - } else + } + else { relOid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); segId = GpIdentity.segindex; @@ -795,7 +799,8 @@ load_table_size(HTAB *local_table_stats_map) if (tupdesc->natts != 3) { ereport(WARNING, (errmsg("[diskquota] tupdesc->natts: %d", tupdesc->natts))); - } else + } + else { ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0]->atttypid, tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); @@ -861,7 +866,8 @@ convert_map_to_string(HTAB *local_active_table_oid_maps) if (count != nitems) { appendStringInfo(&buffer, "%d,", entry->reloid); - } else + } + else { appendStringInfo(&buffer, "%d", entry->reloid); } @@ -1016,7 +1022,8 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar entry->reloid = reloid; entry->tablesize = tableSize; entry->segid = -1; - } else + } + else { /* sum table size from all the segments */ entry->tablesize = entry->tablesize + tableSize; diff --git a/quotamodel.c b/quotamodel.c index 4b7c3427bef..b457e3bba7f 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -243,7 +243,8 @@ update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys) if (key.segid == -1) { entry->limit = limit; - } else + } + else { entry->limit = round((limit / SEGCOUNT) * segratio); } @@ -817,7 +818,8 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { reltablespace = MyDatabaseTableSpace; } - } else + } + else { LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); DiskQuotaRelationCacheEntry *relation_entry = hash_search(relation_cache, &relOid, HASH_FIND, NULL); @@ -1065,7 +1067,8 @@ flush_local_black_map(void) ereport(WARNING, (errmsg("[diskquota] Shared disk quota black map size limit reached." "Some out-of-limit schemas or roles will be lost" "in blacklist."))); - } else + } + else { /* new db objects which exceed quota limit */ if (!found) @@ -1080,7 +1083,8 @@ flush_local_black_map(void) blackentry->segexceeded = localblackentry->segexceeded; localblackentry->isexceeded = false; localblackentry->segexceeded = false; - } else + } + else { /* db objects are removed or under quota limit in the new loop */ (void)hash_search(disk_quota_black_map, (void *)&localblackentry->keyitem, HASH_REMOVE, NULL); @@ -1281,7 +1285,8 @@ do_load_quotas(void) quotaType, quota_info[quotaType].num_keys))); } update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); - } else + } + else { update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid, spcOid}); } @@ -1767,7 +1772,8 @@ refresh_blackmap(PG_FUNCTION_ARGS) break; } } - } else + } + else { /* * We cannot fetch the relation from syscache. It may be an uncommitted relation. diff --git a/relation_cache.c b/relation_cache.c index 91a1f9959ba..0cf96df8f4f 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -206,7 +206,8 @@ parse_primary_table_oid(Oid relid, bool on_bgworker) { return InvalidOid; } - } else + } + else { rel = diskquota_relation_open(relid, NoLock); if (rel == NULL) From 439fee6c69af9096de46c1843c937fefeb5e8398 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 1 Apr 2022 14:47:15 +0800 Subject: [PATCH 177/330] Add the ytt based release pipeline (#198) The release pipeline will: - Automatically make a release build for every commit. - The final release step needs to be started manually. - Then the build will be pushed to the release bucket. - And a tag will be pushed to the repository. --- CMakeLists.txt | 17 +- concourse/README.md | 12 +- concourse/fly.sh | 11 + concourse/pipeline/job_def.lib.yml | 53 +++- concourse/pipeline/pipeline.yml | 250 ------------------ concourse/pipeline/release.yml | 47 ++++ concourse/pipeline/release_pipeline.yml | 325 ------------------------ concourse/pipeline/res_def.yml | 32 ++- concourse/pipeline/trigger_def.lib.yml | 40 ++- concourse/scripts/build_diskquota.sh | 2 +- concourse/scripts/test_diskquota.sh | 2 +- 11 files changed, 200 insertions(+), 591 deletions(-) delete mode 100644 concourse/pipeline/pipeline.yml create mode 100644 concourse/pipeline/release.yml delete mode 100644 concourse/pipeline/release_pipeline.yml diff --git a/CMakeLists.txt b/CMakeLists.txt index 82ebe5bbffd..cb1c80e73e9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -129,12 +129,23 @@ if(DEFINED DISKQUOTA_LAST_RELEASE_PATH) ) endif() -set(CPACK_GENERATOR "TGZ") -set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) -set(CPACK_PACKAGE_FILE_NAME +set(tgz_NAME "diskquota-${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}.${DISKQUOTA_PATCH_VERSION}-${DISTRO_NAME}_x86_64" ) +set(CPACK_GENERATOR "TGZ") +set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) +set(CPACK_PACKAGE_FILE_NAME ${tgz_NAME}) include(CPack) +# create_artifact target is used to tar the package with version into a version-less tarball to be +# used on concourse gcs resource. It will be uploaded to a gcs version file (no diskquota version +# string in the file name), and be retrieved in the release step. Then we don't have to firgure out +# a way to add the version string back to the release file name, just untar it. +set(artifact_NAME "diskquota.tar.gz") +add_custom_target(create_artifact + COMMAND + ${CMAKE_COMMAND} --build . --target package + COMMAND + ${CMAKE_COMMAND} -E tar cvf ${artifact_NAME} "${tgz_NAME}.tar.gz") # packing end # Create build-info diff --git a/concourse/README.md b/concourse/README.md index 79f9ffbaa95..1eadfde16e6 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -46,7 +46,17 @@ https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/COMMIT:diskquota:gpdb ## Fly the release pipeline -TBD +By default, the release is built from the `gpdb` branch + +``` +./fly.sh -t extension -c release +``` + +To fly a release pipeline from a specific branch: + +``` +./fly.sh -t extension -c release -b release/. +``` ## Fly the dev pipeline diff --git a/concourse/fly.sh b/concourse/fly.sh index d4c6e6c42ad..4bb143cbd18 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -73,6 +73,17 @@ case ${pipeline_config} in pipeline_name="DEV:${pipeline_name}" config_file="dev.yml" ;; + release) + # Default branch is 'gpdb' as it is our main branch + if [ -z "${branch}" ]; then + branch="gpdb" + fi + if [ -z "${pipeline_name}" ]; then + pipeline_name="RELEASE:diskquota:${branch}" + fi + config_file="release.yml" + hook_res="diskquota_commit" + ;; *) usage "" ;; diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index f32d400f8bd..76798f09125 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -9,6 +9,7 @@ res_test_image: centos6-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel6 res_intermediates_bin: bin_diskquota_gpdb6_rhel6_intermediates +release_bin: bin_diskquota_gpdb6_rhel6_release os: rhel6 build_type: #@ "Release" if release_build else "Debug" #@ end @@ -20,6 +21,7 @@ res_test_image: centos7-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel7 res_intermediates_bin: bin_diskquota_gpdb6_rhel7_intermediates +release_bin: bin_diskquota_gpdb6_rhel7_release os: rhel7 build_type: #@ "Release" if release_build else "Debug" #@ end @@ -31,6 +33,7 @@ res_test_image: rhel8-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel8 res_intermediates_bin: bin_diskquota_gpdb6_rhel8_intermediates +release_bin: bin_diskquota_gpdb6_rhel8_release os: rhel8 build_type: #@ "Release" if release_build else "Debug" #@ end @@ -42,6 +45,7 @@ res_test_image: ubuntu18-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 res_intermediates_bin: bin_diskquota_gpdb6_ubuntu18_intermediates +release_bin: bin_diskquota_gpdb6_ubuntu18_release os: ubuntu18.04 build_type: #@ "Release" if release_build else "Debug" #@ end @@ -54,10 +58,10 @@ on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: #@ for to_get in trigger["to_get"]: -- trigger: true +- trigger: #@ trigger["auto_trigger"] _: #@ template.replace(to_get) #@ end -#@ for to_put in trigger["to_put"]: +#@ for to_put in trigger["to_put_pre"]: - #@ to_put #@ end #@ end @@ -71,10 +75,10 @@ on_failure: #@ trigger["on_failure"] on_error: #@ trigger["on_error"] plan: #@ for to_get in trigger["to_get"]: -- trigger: true +- trigger: #@ trigger["auto_trigger"] _: #@ template.replace(to_get) #@ end -#@ for to_put in trigger["to_put"]: +#@ for to_put in trigger["to_put_pre"]: - #@ to_put #@ end - get: clang-format-image @@ -126,6 +130,43 @@ plan: #@ end #@ end +#! The final release job +#! 1. Push the artifacts to the release bucket +#! 2. Push the git tag +#@ def exit_release_job(param): +#@ trigger = param["trigger"] +#@ confs = param["confs"] +#@ passed_jobs = [] +#@ res_map = param["res_map"] +#@ for conf in confs: +#@ passed_jobs.append(build_test_job_name(conf)) +#@ add_res_by_name(res_map, conf["release_bin"]) +#@ end +name: exit_release +on_failure: #@ trigger["on_failure"] +on_error: #@ trigger["on_error"] +on_success: #@ trigger["on_success"] +plan: +#@ for to_get in trigger["to_get"]: +- passed: #@ passed_jobs + _: #@ template.replace(to_get) +#@ end +- in_parallel: + steps: +#@ for conf in confs: + - do: + - get: #@ conf["res_intermediates_bin"] + params: + unpack: true + - put: #@ conf["release_bin"] + params: + file: #@ conf["res_intermediates_bin"] + "/diskquota-*-*.tar.gz" +#@ end +#@ for to_put in trigger["to_put_post"]: +- #@ to_put +#@ end +#@ end + #@ def _build_task(conf): task: #@ "build_" + conf["os"] file: diskquota_src/concourse/tasks/build_diskquota.yml @@ -139,7 +180,7 @@ params: #@ def _test_task(conf): task: #@ "test_" + conf["os"] -timeout: 1h +timeout: 2h file: diskquota_src/concourse/tasks/test_diskquota.yml image: #@ conf["res_test_image"] input_mapping: @@ -183,5 +224,5 @@ plan: - #@ _test_task(conf) - put: #@ conf["res_intermediates_bin"] params: - file: diskquota_artifacts/diskquota-*_x86_64.tar.gz + file: diskquota_artifacts/diskquota.tar.gz #@ end diff --git a/concourse/pipeline/pipeline.yml b/concourse/pipeline/pipeline.yml deleted file mode 100644 index 9f1f7fbfcf2..00000000000 --- a/concourse/pipeline/pipeline.yml +++ /dev/null @@ -1,250 +0,0 @@ -## ====================================================================== -## Pipeline for GPDB PL/R GPPKG -## ====================================================================== - -groups: -- name: ALL - jobs: - - diskquota_centos6_build_test - - diskquota_centos7_build_test - - diskquota_ubuntu18_build_test - - diskquota_centos7_extension_upgrade_1.0_2.0 -- name: GPDB6 - jobs: - - diskquota_centos6_build_test - - diskquota_centos7_build_test - - diskquota_rhel8_build_test - - diskquota_ubuntu18_build_test -- name: GPDB6_UPGRADE - jobs: - - diskquota_centos7_extension_upgrade_1.0_2.0 - -resource_types: -- name: gcs - type: registry-image - source: - repository: frodenas/gcs-resource - -resources: - -# Image Resources - -- name: centos-gpdb-dev-6 - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test - tag: latest - -- name: centos-gpdb-dev-7 - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test - tag: latest - -- name: rhel-image-dev-8 - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test - tag: latest - username: _json_key - password: ((container-registry-readonly-service-account-key)) - -- name: rhel-image-build-8 - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build - tag: latest - username: _json_key - password: ((container-registry-readonly-service-account-key)) - -- name: ubuntu18-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build - tag: latest - -- name: ubuntu18-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test - tag: latest - -- name: bin_diskquota_centos7_1.0 - type: gcs - source: - bucket: {{gcs-bucket}} - json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(1.*)-rhel7_x86_64.tar.gz - -- name: bin_diskquota_centos7 - type: gcs - source: - bucket: {{gcs-bucket-dev}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: diskquota/intermediates/gpdb6/diskquota-master-rhel7_x86_64.tar.gz - -# Github Source Codes - -- name: gpdb_src - type: git - source: - branch: {{gpdb-git-branch}} - uri: {{gpdb-git-remote}} - -- name: diskquota_src - type: git - source: - branch: gpdb - uri: https://github.com/greenplum-db/diskquota.git - -# gpdb binary on gcs is located as different folder for different version -# use gcs_gpdb_binary_folder to specify them. -- name: bin_gpdb_centos6 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz -- name: bin_gpdb_rhel8 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_rhel8/bin_gpdb.tar.gz -- name: bin_gpdb_centos7 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz -- name: bin_gpdb_ubuntu18 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz - -## jobs -## ====================================================================== - -jobs: -- name: diskquota_centos7_build_test - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-7 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_centos7 - - get: gpdb_src - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-7 - params: - DISKQUOTA_OS: rhel7 - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-7 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel7 - - aggregate: - - put: bin_diskquota_centos7 - params: - file: diskquota_artifacts/diskquota*.tar.gz - -- name: diskquota_rhel8_build_test - max_in_flight: 3 - plan: - - aggregate: - - get: rhel-image-dev-8 - - get: rhel-image-build-8 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_rhel8 - - get: gpdb_src - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: rhel-image-build-8 - params: - DISKQUOTA_OS: rhel8 - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: rhel-image-dev-8 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel8 - -- name: diskquota_centos6_build_test - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-6 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_centos6 - - get: gpdb_src - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-6 - params: - DISKQUOTA_OS: rhel6 - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-6 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel6 - -- name: diskquota_ubuntu18_build_test - max_in_flight: 3 - plan: - - aggregate: - - get: ubuntu18-image-build - - get: ubuntu18-image-test - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_ubuntu18 - - get: gpdb_src - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: ubuntu18-image-build - params: - DISKQUOTA_OS: ubuntu18.04 - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: ubuntu18-image-test - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: ubuntu18.04 - -- name: diskquota_centos7_extension_upgrade_1.0_2.0 - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-7 - - get: bin_diskquota_old - resource: bin_diskquota_centos7_1.0 - - get: bin_diskquota_centos7 - trigger: true - - get: bin_gpdb - resource: bin_gpdb_centos7 - - get: gpdb_src - - get: diskquota_src - - task: upgrade_extension - file: diskquota_src/concourse/tasks/upgrade_extension.yml - input_mapping: - bin_diskquota_new: bin_diskquota_centos7 - image: centos-gpdb-dev-7 - params: - DISKQUOTA_OS: rhel7 - OLD_VERSION: "1.0" - NEW_VERSION: "2.0" diff --git a/concourse/pipeline/release.yml b/concourse/pipeline/release.yml new file mode 100644 index 00000000000..4ba0394203b --- /dev/null +++ b/concourse/pipeline/release.yml @@ -0,0 +1,47 @@ +#@ load("job_def.lib.yml", +#@ "entrance_job", +#@ "build_test_job", +#@ "exit_release_job", +#@ "centos6_gpdb6_conf", +#@ "centos7_gpdb6_conf", +#@ "rhel8_gpdb6_conf", +#@ "ubuntu18_gpdb6_conf") +#@ load("trigger_def.lib.yml", +#@ "release_trigger", +#@ ) +#@ +#@ load("base.lib.yml", "declare_res", "declare_res_type") +#@ res_map = {} +#@ res_type_map = {} +#@ trigger = release_trigger(res_map) +#@ confs = [ +#@ centos6_gpdb6_conf(release_build=True), +#@ centos7_gpdb6_conf(release_build=True), +#@ rhel8_gpdb6_conf(release_build=True), +#@ ubuntu18_gpdb6_conf(release_build=True) +#@ ] +jobs: +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ } +- #@ entrance_job(param) +#@ for conf in confs: +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ "gpdb_src": "gpdb6_src", +#@ "conf": conf +#@ } +- #@ build_test_job(param) +#@ end +#@ param = { +#@ "res_map": res_map, +#@ "trigger": trigger, +#@ "confs": confs +#@ } +- #@ exit_release_job(param) + +resources: #@ declare_res(res_type_map, res_map) + +resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/release_pipeline.yml b/concourse/pipeline/release_pipeline.yml deleted file mode 100644 index 3c2665b4a0a..00000000000 --- a/concourse/pipeline/release_pipeline.yml +++ /dev/null @@ -1,325 +0,0 @@ -## ====================================================================== -## Pipeline for diskquota -## ====================================================================== - -groups: -- name: GPDB6 - jobs: - - release_centos6 - - release_centos7 - - release_ubuntu18 - - release_rhel-8 - - diskquota_centos6_test_release - - diskquota_centos7_test_release - - diskquota_ubuntu18_test_release - - diskquota_rhel8_test_release - -resource_types: -- name: gcs - type: docker-image - source: - repository: frodenas/gcs-resource - -resources: - -# Image Resources - -- name: centos-gpdb-dev-6 - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos6-build-test - tag: latest - -- name: centos-gpdb-dev-7 - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb5-centos7-build-test - tag: latest - -- name: rhel-image-dev-8 - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test - tag: latest - username: _json_key - password: ((container-registry-readonly-service-account-key)) - -- name: rhel-image-build-8 - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build - tag: latest - username: _json_key - password: ((container-registry-readonly-service-account-key)) - -- name: ubuntu18-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build - tag: latest - -- name: ubuntu18-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test - tag: latest - - -# Github Source Codes - -- name: gpdb_src - type: git - source: - branch: {{gpdb-git-branch}} - uri: {{gpdb-git-remote}} - -- name: diskquota_src - type: git - source: - branch: release/1.X - uri: https://github.com/greenplum-db/diskquota.git - tag_filter: 1.* - -# gpdb binary on gcs is located as different folder for different version -# use gcs_gpdb_binary_folder to specify them. -- name: bin_gpdb_centos6 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos6/bin_gpdb.tar.gz - -- name: bin_gpdb_centos7 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_centos7/bin_gpdb.tar.gz - -- name: bin_gpdb_rhel8 - type: gcs - source: - bucket: {{gcs-bucket-intermediates}} - json_key: {{concourse-gcs-resources-service-account-key}} - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_rhel8/bin_gpdb.tar.gz - -- name: bin_gpdb_ubuntu18 - type: gcs - source: - bucket: ((gcs-bucket-intermediates)) - json_key: ((concourse-gcs-resources-service-account-key)) - versioned_file: ((gcs_gpdb_binary_folder))/bin_gpdb_ubuntu18.04/bin_gpdb.tar.gz - -- name: bin_diskquota_centos7 - type: gcs - source: - bucket: {{gcs-bucket}} - json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_centos6 - type: gcs - source: - bucket: {{gcs-bucket}} - json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_rhel8 - type: gcs - source: - bucket: {{gcs-bucket}} - json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_ubuntu18 - type: gcs - source: - bucket: {{gcs-bucket}} - json_key: {{concourse-gcs-resources-service-account-key}} - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -## jobs -## ====================================================================== - -jobs: -- name: release_rhel-8 - max_in_flight: 3 - plan: - - aggregate: - - get: rhel-image-dev-8 - - get: rhel-image-build-8 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_rhel8 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: rhel-image-build-8 - params: - DISKQUOTA_OS: rhel8 - - aggregate: - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: rhel-image-dev-8 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel8 - - aggregate: - - put: bin_diskquota_rhel8 - params: - file: diskquota_artifacts/diskquota*.tar.gz - -- name: release_centos7 - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-7 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_centos7 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-7 - params: - DISKQUOTA_OS: rhel7 - - aggregate: - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-7 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel7 - - aggregate: - - put: bin_diskquota_centos7 - params: - file: diskquota_artifacts/diskquota*.tar.gz - -- name: release_centos6 - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-6 - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_centos6 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: centos-gpdb-dev-6 - params: - DISKQUOTA_OS: rhel6 - - aggregate: - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-6 - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: rhel6 - - aggregate: - - put: bin_diskquota_centos6 - params: - file: diskquota_artifacts/diskquota*.tar.gz - -- name: release_ubuntu18 - max_in_flight: 3 - plan: - - aggregate: - - get: ubuntu18-image-build - - get: ubuntu18-image-test - - get: diskquota_src - trigger: true - - get: bin_gpdb - resource: bin_gpdb_ubuntu18 - - get: gpdb_src - - aggregate: - - task: build_diskquota - file: diskquota_src/concourse/tasks/build_diskquota.yml - image: ubuntu18-image-build - params: - DISKQUOTA_OS: ubuntu18.04 - - aggregate: - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: ubuntu18-image-test - input_mapping: - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: ubuntu18.04 - - aggregate: - - put: bin_diskquota_ubuntu18 - params: - file: diskquota_artifacts/diskquota*.tar.gz - -- name: diskquota_centos6_test_release - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-6 - - get: diskquota_src - - get: bin_diskquota - resource: bin_diskquota_centos6 - - get: bin_gpdb - resource: bin_gpdb_centos6 - trigger: true - - get: gpdb_src - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-6 - -- name: diskquota_centos7_test_release - max_in_flight: 3 - plan: - - aggregate: - - get: centos-gpdb-dev-7 - - get: diskquota_src - - get: bin_diskquota - resource: bin_diskquota_centos7 - - get: bin_gpdb - resource: bin_gpdb_centos7 - trigger: true - - get: gpdb_src - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: centos-gpdb-dev-7 - -- name: diskquota_rhel8_test_release - max_in_flight: 3 - plan: - - aggregate: - - get: rhel-image-dev-8 - - get: diskquota_src - - get: bin_diskquota - resource: bin_diskquota_rhel8 - - get: bin_gpdb - resource: bin_gpdb_rhel8 - trigger: true - - get: gpdb_src - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: rhel-image-dev-8 - -- name: diskquota_ubuntu18_test_release - max_in_flight: 3 - plan: - - aggregate: - - get: ubuntu18-image-test - - get: diskquota_src - - get: bin_diskquota - resource: bin_diskquota_ubuntu18 - - get: bin_gpdb - resource: bin_gpdb_ubuntu18 - trigger: true - - get: gpdb_src - - task: test_diskquota - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: ubuntu18-image-test diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 5dcecb74ac7..ef15f9ed4fd 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -33,7 +33,7 @@ resources: type: git # We should rely on the webhook. See README if webhook doesn't work webhook_token: ((diskquota-webhook-token)) - check_every: 24h + check_every: 1h source: branch: ((diskquota-branch)) uri: https://github.com/greenplum-db/diskquota.git @@ -227,6 +227,35 @@ resources: json_key: ((extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz +# For uploading to the release bucket +- name: bin_diskquota_gpdb6_rhel6_release + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz + +- name: bin_diskquota_gpdb6_rhel7_release + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz + +- name: bin_diskquota_gpdb6_rhel8_release + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz + +- name: bin_diskquota_gpdb6_ubuntu18_release + type: gcs + source: + bucket: ((gcs-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz + # Other dependencies - name: bin_cmake type: gcs @@ -234,6 +263,7 @@ resources: bucket: gpdb-extensions-concourse-resources json_key: ((extensions-gcs-service-account-key)) regexp: dependencies/cmake-(.*)-linux-x86_64.sh + - name: slack_notify type: slack-alert source: diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml index fa8f51bfbd0..c2854d02712 100644 --- a/concourse/pipeline/trigger_def.lib.yml +++ b/concourse/pipeline/trigger_def.lib.yml @@ -3,16 +3,18 @@ #! PR trigger. For pull request pipelines #@ def pr_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_pr") +auto_trigger: true to_get: - get: diskquota_src resource: diskquota_pr params: fetch_tags: true -to_put: +to_put_pre: - put: diskquota_pr params: path: diskquota_src status: pending +to_put_post: #@ [] on_failure: put: diskquota_pr params: @@ -34,10 +36,12 @@ on_success: #@ def commit_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") #@ add_res_by_name(res_map, "slack_notify") +auto_trigger: true to_get: - get: diskquota_src resource: diskquota_commit -to_put: #@ [] +to_put_pre: #@ [] +to_put_post: #@ [] #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. #! Unfortunately it doesn't work with Concourse 5. on_success: @@ -54,10 +58,12 @@ on_error: #! Commit trigger. For dev pipelines. No webhook #@ def commit_dev_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit_dev") +auto_trigger: true to_get: - get: diskquota_src resource: diskquota_commit_dev -to_put: #@ [] +to_put_pre: #@ [] +to_put_post: #@ [] #! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. #! Unfortunately it doesn't work with Concourse 5. on_success: @@ -65,3 +71,31 @@ on_failure: on_error: #@ end +#! Commit trigger. For release pipelines +#@ def release_trigger(res_map): +#@ add_res_by_name(res_map, "diskquota_commit") +#@ add_res_by_name(res_map, "slack_notify") +auto_trigger: true +to_get: +- get: diskquota_src + resource: diskquota_commit +to_put_pre: #@ [] +to_put_post: +- put: diskquota_commit + params: + repository: diskquota_src + tag: diskquota_src/VERSION +on_success: + put: slack_notify + params: + alert_type: success + text: A new diskquota release has been pushed! +on_failure: + put: slack_notify + params: + alert_type: failed +on_error: + put: slack_notify + params: + alert_type: errored +#@ end diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index a725037c584..312e49896bd 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -16,7 +16,7 @@ function pkg() { cmake /home/gpadmin/diskquota_src \ -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" - cmake --build . --target package + cmake --build . --target create_artifact popd } diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index c34fa656507..07033b39996 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -10,7 +10,7 @@ function activate_standby() { } function _main() { - tar -xzf /home/gpadmin/bin_diskquota/*.tar.gz -C /usr/local/greenplum-db-devel + tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C /usr/local/greenplum-db-devel source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh pushd /home/gpadmin/diskquota_artifacts From 5a0f4f81784978b4b6697473cabbb3c43e6409f6 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Fri, 1 Apr 2022 15:34:43 +0800 Subject: [PATCH 178/330] Use on/off in diskquota.status() (#200) PostgreSQL uses on/off for all bool variables. soft limits: on (soft will works) / off (soft limit not works) hard limits: on (hard will works) / off (hard limit not works) --- diskquota.c | 4 +- tests/regress/expected/test_show_status.out | 44 ++++++++++----------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/diskquota.c b/diskquota.c index de7b9163eb4..1f22cc537ed 100644 --- a/diskquota.c +++ b/diskquota.c @@ -1184,7 +1184,7 @@ diskquota_status_check_soft_limit() if (!found) return "paused"; // if worker booted, check 'worker_map->is_paused' - return paused ? "paused" : "enabled"; + return paused ? "paused" : "on"; } static const char * @@ -1210,7 +1210,7 @@ diskquota_status_check_hard_limit() // hard limits should also paused if (found && paused && hardlimit) return "paused"; - return hardlimit ? "enabled" : "disabled"; + return hardlimit ? "on" : "off"; } static const char * diff --git a/tests/regress/expected/test_show_status.out b/tests/regress/expected/test_show_status.out index 0ec41dc5f3a..14c3e7de9fd 100644 --- a/tests/regress/expected/test_show_status.out +++ b/tests/regress/expected/test_show_status.out @@ -1,26 +1,26 @@ select * from diskquota.status() where name not like '%version'; - name | status --------------+---------- - soft limits | enabled - hard limits | disabled + name | status +-------------+-------- + soft limits | on + hard limits | off (2 rows) \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null select * from diskquota.status() where name not like '%version'; - name | status --------------+--------- - soft limits | enabled - hard limits | enabled + name | status +-------------+-------- + soft limits | on + hard limits | on (2 rows) \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null select * from diskquota.status() where name not like '%version'; - name | status --------------+---------- - soft limits | enabled - hard limits | disabled + name | status +-------------+-------- + soft limits | on + hard limits | off (2 rows) select from diskquota.pause(); @@ -28,10 +28,10 @@ select from diskquota.pause(); (1 row) select * from diskquota.status() where name not like '%version'; - name | status --------------+---------- + name | status +-------------+-------- soft limits | paused - hard limits | disabled + hard limits | off (2 rows) \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null @@ -46,10 +46,10 @@ select * from diskquota.status() where name not like '%version'; \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null select * from diskquota.status() where name not like '%version'; - name | status --------------+---------- + name | status +-------------+-------- soft limits | paused - hard limits | disabled + hard limits | off (2 rows) select from diskquota.resume(); @@ -59,9 +59,9 @@ select from diskquota.resume(); \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null select * from diskquota.status() where name not like '%version'; - name | status --------------+---------- - soft limits | enabled - hard limits | disabled + name | status +-------------+-------- + soft limits | on + hard limits | off (2 rows) From d4fa85ba8e61ef05e2c12cce9a5ca07890570c4a Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 6 Apr 2022 17:05:07 +0800 Subject: [PATCH 179/330] Update licenses (#195) Pivotal became a part of VMware in 2020. --- LICENSE | 3 ++- diskquota.c | 3 ++- diskquota.h | 12 ++++++++++++ diskquota_utility.c | 3 ++- enforcement.c | 3 ++- gp_activetable.c | 3 ++- gp_activetable.h | 12 ++++++++++++ quotamodel.c | 3 ++- relation_cache.c | 11 +++++++++++ relation_cache.h | 11 +++++++++++ 10 files changed, 58 insertions(+), 6 deletions(-) diff --git a/LICENSE b/LICENSE index b23675b99ba..6e94d88cbc9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2004-2018 Pivotal Software, Inc. +Copyright (c) 2004-2020 Pivotal Software, Inc. +Copyright (c) 2020-Present VMware, Inc. or its affiliates diskquota is licensed under the PostgreSQL license, the same license as PostgreSQL. It contains parts of PostgreSQL source code. A copy of diff --git a/diskquota.c b/diskquota.c index 1f22cc537ed..1ed3b8d88d2 100644 --- a/diskquota.c +++ b/diskquota.c @@ -7,7 +7,8 @@ * launcher process which is responsible for starting/refreshing the diskquota * worker processes which monitor given databases. * - * Copyright (c) 2018-Present Pivotal Software, Inc. + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates * * IDENTIFICATION * diskquota/diskquota.c diff --git a/diskquota.h b/diskquota.h index d0798fdf318..1c105528fec 100644 --- a/diskquota.h +++ b/diskquota.h @@ -1,3 +1,15 @@ +/* ------------------------------------------------------------------------- + * + * diskquota.h + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/diskquota.c + * + * ------------------------------------------------------------------------- + */ #ifndef DISK_QUOTA_H #define DISK_QUOTA_H diff --git a/diskquota_utility.c b/diskquota_utility.c index dc70c2d192c..867ea53af00 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -8,7 +8,8 @@ * diskquota_start_worker is used when 'create extension' DDL. It will start * the corresponding worker process immediately. * - * Copyright (c) 2018-Present Pivotal Software, Inc. + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates * * IDENTIFICATION * diskquota/diskquota_utility.c diff --git a/enforcement.c b/enforcement.c index 3ede06b5808..4568db3934c 100644 --- a/enforcement.c +++ b/enforcement.c @@ -5,7 +5,8 @@ * This code registers enforcement hooks to cancel the query which exceeds * the quota limit. * - * Copyright (c) 2018-Present Pivotal Software, Inc. + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates * * IDENTIFICATION * diskquota/enforcement.c diff --git a/gp_activetable.c b/gp_activetable.c index 559f5688515..e7166f30ff0 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -6,7 +6,8 @@ * quotamodel will call gp_fetch_active_tables() to fetch the active tables * and their size information in each loop. * - * Copyright (c) 2018-Present Pivotal Software, Inc. + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates * * IDENTIFICATION * diskquota/gp_activetable.c diff --git a/gp_activetable.h b/gp_activetable.h index 317d703d125..49aa7a5fb4d 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -1,3 +1,15 @@ +/* ------------------------------------------------------------------------- + * + * gp_activetable.h + * + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/gp_activetable.h + * + * ------------------------------------------------------------------------- + */ #ifndef ACTIVE_TABLE_H #define ACTIVE_TABLE_H diff --git a/quotamodel.c b/quotamodel.c index b457e3bba7f..5b2346da69a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -6,7 +6,8 @@ * model. Disk quota related Shared memory initialization is also implemented * in this file. * - * Copyright (c) 2018-Present Pivotal Software, Inc. + * Copyright (c) 2018-2020 Pivotal Software, Inc. + * Copyright (c) 2020-Present VMware, Inc. or its affiliates * * IDENTIFICATION * diskquota/quotamodel.c diff --git a/relation_cache.c b/relation_cache.c index 0cf96df8f4f..5ab2693d442 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -1,3 +1,14 @@ +/* ------------------------------------------------------------------------- + * + * relation_cache.c + * + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/relation_cache.c + * + * ------------------------------------------------------------------------- + */ #include "postgres.h" #include "catalog/indexing.h" diff --git a/relation_cache.h b/relation_cache.h index d9ee70db1fe..c9f662617b9 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -1,3 +1,14 @@ +/* ------------------------------------------------------------------------- + * + * relation_cache.h + * + * Copyright (c) 2020-Present VMware, Inc. or its affiliates + * + * IDENTIFICATION + * diskquota/relation_cache.h + * + * ------------------------------------------------------------------------- + */ #ifndef RELATION_CACHE_H #define RELATION_CACHE_H From 28667dd632aefa64898b48dadfb824162cd71c4c Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 8 Apr 2022 13:50:59 +0800 Subject: [PATCH 180/330] Use released gpdb instead of release candidate (#201) By the gcs resource regex rule, 6.99.99 will be matched as the most recent gpdb release candidate, which is not what we want. It is a testing version from release team. And due to the go regex implementation, (!?) is not supported to filter out an exact string. So the release pipeline is changed to use the most recent published gpdb binary instead. Compared with the previous gpdb_bin, the published one has '--enable-debug-extensions' as the configure parameter. That also enables the isolation2 tests for us. So no need to disable those tests in release build anymore. --- concourse/README.md | 11 ++++++++--- concourse/fly.sh | 3 ++- concourse/pipeline/res_def.yml | 8 ++++---- tests/CMakeLists.txt | 27 ++++----------------------- 4 files changed, 18 insertions(+), 31 deletions(-) diff --git a/concourse/README.md b/concourse/README.md index 1eadfde16e6..4106ce396ad 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -46,16 +46,21 @@ https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/COMMIT:diskquota:gpdb ## Fly the release pipeline -By default, the release is built from the `gpdb` branch +By default, the release is built from the `gpdb` branch. + +The release pipeline should be located in https://prod.ci.gpdb.pivotal.io ``` -./fly.sh -t extension -c release +# Login to prod +fly -t prod login -c https://prod.ci.gpdb.pivotal.io +# Fly the release pipeline +./fly.sh -t prod -c release ``` To fly a release pipeline from a specific branch: ``` -./fly.sh -t extension -c release -b release/. +./fly.sh -t -c release -b release/. ``` ## Fly the dev pipeline diff --git a/concourse/fly.sh b/concourse/fly.sh index 4bb143cbd18..d63cb58e853 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -118,9 +118,10 @@ if [ "${pipeline_config}" == "dev" ]; then exit 0 fi +concourse_url=$(fly targets | awk "{if (\$1 == \"${target}\") {print \$2}}") echo "" echo "================================================================================" echo "Remeber to set the the webhook URL on GitHub:" -echo "https://extensions.ci.gpdb.pivotal.io/api/v1/teams/main/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" +echo "${concourse_url}/api/v1/teams/main/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" echo "You may need to change the base URL if a differnt concourse server is used." echo "================================================================================" diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index ef15f9ed4fd..55411888b3c 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -149,25 +149,25 @@ resources: source: bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-centos6.tar.gz + regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.tar.gz - name: bin_gpdb6_centos7 type: gcs source: bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-centos7.tar.gz + regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.tar.gz - name: bin_gpdb6_rhel8 type: gcs source: bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-rhel8.tar.gz + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.tar.gz - name: bin_gpdb6_ubuntu18 type: gcs source: bucket: ((gcs-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(.*)-ubuntu18.04.tar.gz + regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 80eb18c8dc3..099eda3c15a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,20 +1,5 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) -# Test cases need fault injector needs to be excluded from release build. -# GPDB release build doesn't support fault injector -if (CMAKE_BUILD_TYPE STREQUAL "Release") -list(APPEND exclude_regress_for_release - test_fetch_table_stat) -list(APPEND exclude_isolation2_for_release - test_relation_size - test_blackmap - test_vacuum - test_truncate - test_worker_timeout) -else() - set(load_inject_fault_opts --load-extension=gp_inject_fault) -endif() - RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file @@ -23,10 +8,8 @@ RegressTarget_Add(regress RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule - EXCLUDE - ${exclude_regress_for_release} REGRESS_OPTS - ${load_inject_fault_opts} + --load-extension=gp_inject_fault --dbname=contrib_regression) RegressTarget_Add(isolation2 @@ -39,10 +22,8 @@ RegressTarget_Add(isolation2 RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule - EXCLUDE - ${exclude_isolation2_for_release} REGRESS_OPTS - ${load_inject_fault_opts} + --load-extension=gp_inject_fault --dbname=isolation2test) add_custom_target(installcheck) @@ -59,7 +40,7 @@ add_dependencies(installcheck isolation2 regress) # REGRESS # config test_create_extension # REGRESS_OPTS -# ${load_inject_fault_opts} +# --load-extension=gp_inject_fault # --dbname=contrib_regression) # RegressTarget_Add(regress_truncate_loop # INIT_FILE @@ -72,7 +53,7 @@ add_dependencies(installcheck isolation2 regress) # test_truncate # RUN_TIMES -1 # REGRESS_OPTS -# ${load_inject_fault_opts} +# --load-extension=gp_inject_fault # --dbname=contrib_regression # --use-existing) # add_dependencies(regress_truncate_loop regress_config) From 6e245b1e7d93a96023fc65a30a358fd5f5bfae1f Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Fri, 8 Apr 2022 17:41:21 +0800 Subject: [PATCH 181/330] Fix tablespace per segment ratio (#199) If one tablespace segratio is configured, this commit will make sure that ethier the already existed (schema/role)_tablespace_quota or new configs after this, will have the same segratio. 1) Add a new quota type: TABLESPACE_QUOTA used to store the tablespace segratio value in quota_config table. When set_per_segment_quota("xx","1.0) is called, a new config will be added like: quota_type = 4 (TABLESPACE_QUOTA) quota = 0 (invalid quota) segratio = 1 2) Modify set_quota_config_internal to support insert/delete/update new type config 3) Add function get_per_segment_ratio it is called when insert new (schema/role)_tablespace_quota. "for share" is used to query segratio,it only can keep the segratio is consistent in "read committed" level. Co-authored-by: Sasasu --- diskquota--1.0--2.0.sql | 2 +- diskquota--2.0.sql | 2 +- diskquota.h | 12 +- diskquota_utility.c | 296 +++++++++++++----- quotamodel.c | 6 +- .../expected/test_per_segment_config.out | 269 ++++++++++++++++ tests/isolation2/isolation2_schedule | 1 + .../sql/test_per_segment_config.sql | 114 +++++++ .../test_tablespace_schema_perseg.out | 62 +++- .../sql/test_tablespace_schema_perseg.sql | 12 + 10 files changed, 683 insertions(+), 93 deletions(-) create mode 100644 tests/isolation2/expected/test_per_segment_config.out create mode 100644 tests/isolation2/sql/test_per_segment_config.sql diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 40fd5f81060..dc277c382fe 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -1,7 +1,7 @@ -- TODO check if worker should not refresh, current lib should be diskquota-2.0.so -- table part -ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT -1; +ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT 0; CREATE TABLE diskquota.target ( quotatype int, -- REFERENCES disquota.quota_config.quotatype, diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 6b11d24f1a2..d0c76e171cc 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -7,7 +7,7 @@ CREATE TABLE diskquota.quota_config( targetOid oid, quotatype int, quotalimitMB int8, - segratio float4 DEFAULT -1, + segratio float4 DEFAULT 0, PRIMARY KEY(targetOid, quotatype) ) DISTRIBUTED BY (targetOid, quotatype); diff --git a/diskquota.h b/diskquota.h index 1c105528fec..89690b49a45 100644 --- a/diskquota.h +++ b/diskquota.h @@ -30,13 +30,23 @@ /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 10 - typedef enum { NAMESPACE_QUOTA = 0, ROLE_QUOTA, NAMESPACE_TABLESPACE_QUOTA, ROLE_TABLESPACE_QUOTA, + /* + * TABLESPACE_QUOTA + * used in `quota_config` table, + * when set_per_segment_quota("xx",1.0) is called + * to set per segment quota to '1.0', the config + * will be: + * quotatype = 4 (TABLESPACE_QUOTA) + * quotalimitMB = 0 (invalid quota confined) + * segratio = 1.0 + */ + TABLESPACE_QUOTA, NUM_QUOTA_TYPES } QuotaType; diff --git a/diskquota_utility.c b/diskquota_utility.c index 867ea53af00..fe5a3e3f6e7 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -66,14 +66,35 @@ PG_FUNCTION_INFO_V1(pull_all_table_size); /* timeout count to wait response from launcher process, in 1/10 sec */ #define WAIT_TIME_COUNT 1200 +/* + * three types values for "quota" column in "quota_config" table: + * 1) more than 0: valid value + * 2) 0: meaningless value, rejected by diskquota UDF + * 3) less than 0: to delete the quota config in the table + * + * the values for segratio column are the same as quota column + * + * In quota_config table, + * 1) when quota type is "TABLESPACE_QUOTA", + * the quota column value is always INVALID_QUOTA + * 2) when quota type is "NAMESPACE_TABLESPACE_QUOTA" or "ROLE_TABLESPACE_QUOTA" + * and no segratio configed for the tablespace, the segratio value is + * INVALID_SEGRATIO. + * 3) when quota type is "NAMESPACE_QUOTA" or "ROLE_QUOTA", the segratio is + * always INVALID_SEGRATIO. + */ +#define INVALID_SEGRATIO 0.0 +#define INVALID_QUOTA 0 static object_access_hook_type next_object_access_hook; static bool is_database_empty(void); static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static const char *ddl_err_code_to_err_message(MessageResult code); static int64 get_size_in_mb(char *str); -static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type); -static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid); +static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static float4 get_per_segment_ratio(Oid spcoid); +static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); List *get_rel_oid_list(void); @@ -695,7 +716,9 @@ set_role_quota(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } - set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA); + SPI_connect(); + set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA, INVALID_SEGRATIO, InvalidOid); + SPI_finish(); PG_RETURN_VOID(); } @@ -727,7 +750,9 @@ set_schema_quota(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } - set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA); + SPI_connect(); + set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_QUOTA, INVALID_SEGRATIO, InvalidOid); + SPI_finish(); PG_RETURN_VOID(); } @@ -769,8 +794,10 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } + SPI_connect(); set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); - set_quota_config_internal(roleoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); + set_quota_config_internal(roleoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + SPI_finish(); PG_RETURN_VOID(); } @@ -812,13 +839,23 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } + SPI_connect(); set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); - set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); + set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + SPI_finish(); PG_RETURN_VOID(); } +/* + * set_quota_config_intenral - insert/update/delete quota_config table + * + * If the segratio is valid, query the segratio from + * the table "quota_config" by spcoid. + * + * DELETE doesn't need the segratio + */ static void -set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) +set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid) { int ret; @@ -826,7 +863,6 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) * If error happens in set_quota_config_internal, just return error messages to * the client side. So there is no need to catch the error. */ - SPI_connect(); ret = SPI_execute_with_args("select true from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, (Oid[]){ @@ -840,59 +876,90 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type) NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select quota setting table: error code %d", ret); - /* if the schema or role's quota has been set before */ - if (SPI_processed == 0 && quota_limit_mb > 0) + if (to_delete_quota(type, quota_limit_mb, segratio)) { - ret = SPI_execute_with_args("insert into diskquota.quota_config values($1, $2, $3)", 3, - (Oid[]){ - OIDOID, - INT4OID, - INT8OID, - }, - (Datum[]){ - ObjectIdGetDatum(targetoid), - Int32GetDatum(type), - Int64GetDatum(quota_limit_mb), - }, - NULL, false, 0); - if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); - } - else if (SPI_processed > 0 && quota_limit_mb < 0) - { - ret = SPI_execute_with_args("delete from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, - (Oid[]){ - OIDOID, - INT4OID, - }, - (Datum[]){ - ObjectIdGetDatum(targetoid), - Int32GetDatum(type), - }, - NULL, false, 0); - if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + if (SPI_processed > 0) + { + ret = SPI_execute_with_args("delete from diskquota.quota_config where targetoid = $1 and quotatype = $2", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from quota setting table, error code %d", ret); + } + // else do nothing } - else if (SPI_processed > 0 && quota_limit_mb > 0) + // to upsert quota_config + else { - ret = SPI_execute_with_args( - "update diskquota.quota_config set quotalimitMB = $1 where targetoid= $2 and quotatype = $3", 3, - (Oid[]){ - INT8OID, - OIDOID, - INT4OID, - }, - (Datum[]){ - Int64GetDatum(quota_limit_mb), - ObjectIdGetDatum(targetoid), - Int32GetDatum(type), - }, - NULL, false, 0); - if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); + if (SPI_processed == 0) + { + if (segratio == INVALID_SEGRATIO && !(type == ROLE_QUOTA || type == NAMESPACE_QUOTA)) + segratio = get_per_segment_ratio(spcoid); + ret = SPI_execute_with_args("insert into diskquota.quota_config values($1, $2, $3, $4)", 4, + (Oid[]){ + OIDOID, + INT4OID, + INT8OID, + FLOAT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + Int64GetDatum(quota_limit_mb), + Float4GetDatum(segratio), + }, + NULL, false, 0); + if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + } + else + { + // no need to update segratio + if (segratio == INVALID_SEGRATIO) + { + ret = SPI_execute_with_args( + "update diskquota.quota_config set quotalimitMB = $1 where targetoid= $2 and quotatype = $3", 3, + (Oid[]){ + INT8OID, + OIDOID, + INT4OID, + }, + (Datum[]){ + Int64GetDatum(quota_limit_mb), + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + } + else + { + ret = SPI_execute_with_args( + "update diskquota.quota_config set quotalimitMb = $1, segratio = $2 where targetoid= $3 and " + "quotatype = $4", + 4, + (Oid[]){ + INT8OID, + FLOAT4OID, + OIDOID, + INT4OID, + }, + (Datum[]){ + Int64GetDatum(quota_limit_mb), + Float4GetDatum(segratio), + ObjectIdGetDatum(targetoid), + Int32GetDatum(type), + }, + NULL, false, 0); + } + if (ret != SPI_OK_UPDATE) elog(ERROR, "cannot update quota setting table, error code %d", ret); + } } - /* - * And finish our transaction. - */ - SPI_finish(); return; } @@ -902,10 +969,9 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType int ret; /* - * If error happens in set_quota_config_internal, just return error messages to + * If error happens in set_target_internal, just return error messages to * the client side. So there is no need to catch the error. */ - SPI_connect(); ret = SPI_execute_with_args( "select true from diskquota.quota_config as q, diskquota.target as t" @@ -928,7 +994,7 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target setting table: error code %d", ret); - /* if the schema or role's quota has been set before */ + /* if the schema or role's quota has not been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { ret = SPI_execute_with_args("insert into diskquota.target values($1, $2, $3)", 3, @@ -959,11 +1025,8 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType NULL, false, 0); if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from target setting table, error code %d", ret); } + /* No need to update the target table */ - /* - * And finish our transaction. - */ - SPI_finish(); return; } @@ -1148,35 +1211,35 @@ set_per_segment_quota(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("unable to connect to execute internal query"))); } + /* + * lock table quota_config table in exlusive mode + * + * Firstly insert the segratio with TABLESPACE_QUOTA + * row into the table(ROWSHARE lock), then udpate the + * segratio for TABLESPACE_SHCEMA/ROLE_QUOTA rows + * (EXLUSIZE lock), if we don't lock the table in + * exlusive mode first, deadlock will heappen. + */ + ret = SPI_execute("LOCK TABLE diskquota.quota_config IN EXCLUSIVE MODE", false, 0); + if (ret != SPI_OK_UTILITY) elog(ERROR, "cannot lock quota_config table, error code %d", ret); - /* Get all targetOid which are related to this tablespace, and saved into rowIds */ - ret = SPI_execute_with_args( - "SELECT true FROM diskquota.target AS t, diskquota.quota_config AS q WHERE tablespaceOid = $1 AND " - "(t.quotaType = $2 OR t.quotaType = $3) AND t.primaryOid = q.targetOid AND t.quotaType = q.quotaType", - 3, - (Oid[]){ - OIDOID, - INT4OID, - INT4OID, - }, - (Datum[]){ - ObjectIdGetDatum(spcoid), - Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), - Int32GetDatum(ROLE_TABLESPACE_QUOTA), - }, - NULL, true, 0); - if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target and quota setting table: error code %d", ret); - - if (SPI_processed <= 0) - { - ereport(ERROR, (errmsg("there are no roles or schema quota configed for this tablespace: %s, can't config per " - "segment ratio for it", - spcname))); - } + /* + * insert/update/detele tablespace ratio config in the quota_config table + * for TABLESPACE_QUOTA, it doesn't store any quota info, just used to + * store the ratio for the tablespace. + */ + set_quota_config_internal(spcoid, INVALID_QUOTA, TABLESPACE_QUOTA, ratio, InvalidOid); /* * UPDATEA NAMESPACE_TABLESPACE_PERSEG_QUOTA AND ROLE_TABLESPACE_PERSEG_QUOTA config for this tablespace */ + + /* set to invalid ratio value if the tablespace per segment quota deleted */ + if (ratio < 0) + { + ratio = INVALID_SEGRATIO; + } + ret = SPI_execute_with_args( "UPDATE diskquota.quota_config AS q set segratio = $1 FROM diskquota.target AS t WHERE " "q.targetOid = t.primaryOid AND (t.quotaType = $2 OR t.quotaType = $3) AND t.quotaType = " @@ -1535,3 +1598,62 @@ diskquota_parse_primary_table_oid(Oid namespace, char *relname) } return InvalidOid; } + +static float4 +get_per_segment_ratio(Oid spcoid) +{ + int ret; + float4 segratio = INVALID_SEGRATIO; + + if (!OidIsValid(spcoid)) return segratio; + + /* + * using row share lock to lock TABLESPACE_QUTAO + * row to avoid concurrently updating the segratio + */ + ret = SPI_execute_with_args( + "select segratio from diskquota.quota_config where targetoid = $1 and quotatype = $2 for share", 2, + (Oid[]){ + OIDOID, + INT4OID, + }, + (Datum[]){ + ObjectIdGetDatum(spcoid), + Int32GetDatum(TABLESPACE_QUOTA), + }, + NULL, false, 0); + if (ret != SPI_OK_SELECT) + { + elog(ERROR, "cannot get per segment ratio for the tablepace: error code %d", ret); + } + + if (SPI_processed == 1) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (!isnull) + { + segratio = DatumGetFloat4(dat); + } + } + return segratio; +} + +/* + * For quota type: TABLESPACE_QUOTA, it only stores + * segratio not quota info. So when segratio is + * negtive, we can just delete it. + */ +static bool +to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio) +{ + if (quota_limit_mb < 0) + return true; + else if (segratio < 0 && type == TABLESPACE_QUOTA) + return true; + return false; +} diff --git a/quotamodel.c b/quotamodel.c index 5b2346da69a..cae8a4b4951 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -112,7 +112,9 @@ struct QuotaInfo quota_info[NUM_QUOTA_TYPES] = { [ROLE_TABLESPACE_QUOTA] = {.map_name = "Tablespace-role map", .num_keys = 2, .sys_cache = (Oid[]){AUTHOID, TABLESPACEOID}, - .map = NULL}}; + .map = NULL}, + [TABLESPACE_QUOTA] = { + .map_name = "Tablespace map", .num_keys = 1, .sys_cache = (Oid[]){TABLESPACEOID}, .map = NULL}}; /* global blacklist for which exceed their quota limit */ struct BlackMapEntry @@ -1389,6 +1391,8 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, Oid relowner keyitem->targetoid = relowner; else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) keyitem->targetoid = relnamespace; + else if (type == TABLESPACE_QUOTA) + keyitem->targetoid = reltablespace; else ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unknown quota type: %d", type))); diff --git a/tests/isolation2/expected/test_per_segment_config.out b/tests/isolation2/expected/test_per_segment_config.out new file mode 100644 index 00000000000..92edda43b16 --- /dev/null +++ b/tests/isolation2/expected/test_per_segment_config.out @@ -0,0 +1,269 @@ +-- Test one session read tablespace segratio, +-- and at the same time, another session +-- update or insert the segratio + +-- start_ignore +!\retcode mkdir -p /tmp/spc101; +-- start_ignore + +-- end_ignore +(exited with code 0) +-- end_ignore +CREATE SCHEMA s101; +CREATE +DROP TABLESPACE IF EXISTS spc101; +DROP +CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; +CREATE + +-- +-- There is no tablesapce per segment quota configed yet +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 1.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1.0 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 1.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1.0 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- +-- There is already a tablesapce per segment quota configed +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 1.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1.0 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 1.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1.0 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', -1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 0.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', -1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; + segratio +---------- + 0.0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE +DROP SCHEMA s101; +DROP +DROP TABLESPACE spc101; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 92c6490f007..0530bb21e9f 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -6,5 +6,6 @@ test: test_vacuum test: test_truncate test: test_postmaster_restart test: test_worker_timeout +test: test_per_segment_config test: test_drop_extension test: reset_config diff --git a/tests/isolation2/sql/test_per_segment_config.sql b/tests/isolation2/sql/test_per_segment_config.sql new file mode 100644 index 00000000000..c3d0d35534c --- /dev/null +++ b/tests/isolation2/sql/test_per_segment_config.sql @@ -0,0 +1,114 @@ +-- Test one session read tablespace segratio, +-- and at the same time, another session +-- update or insert the segratio + +-- start_ignore +!\retcode mkdir -p /tmp/spc101; +-- end_ignore +CREATE SCHEMA s101; +DROP TABLESPACE IF EXISTS spc101; +CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; + +-- +-- There is no tablesapce per segment quota configed yet +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', 1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- +-- There is already a tablesapce per segment quota configed +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', 1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_per_segment_quota('spc101', -1); +2: BEGIN; +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; + +-- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio +SELECT diskquota.set_per_segment_quota('spc101', 2); +1: BEGIN; +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +2: BEGIN; +2&: SELECT diskquota.set_per_segment_quota('spc101', -1); +1: COMMIT; +2<: +2: COMMIT; + +SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; +-- cleanup +truncate table diskquota.quota_config; +truncate table diskquota.target; +DROP SCHEMA s101; +DROP TABLESPACE spc101; diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index f2e3f3d508b..b8d08021599 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -15,8 +15,6 @@ SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', SET search_path TO spcs1_perseg; CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); @@ -204,6 +202,66 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -------------+-----------------+-------------+----------------------------- (0 rows) +-- test config per segment quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','-2'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; + segratio +---------- +(0 rows) + +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + segratio +---------- + 0 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','3'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; + segratio +---------- + 3 +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + segratio +---------- + 3 +(1 row) + RESET search_path; DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index a5fb13eb71f..ae6dc51ee3a 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -83,6 +83,18 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; +-- test config per segment quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2','1 MB'); +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','-2'); +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; +SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','3'); +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + RESET search_path; DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; From 2533e46aaf3b7e6622489058b5c237b87c93df2a Mon Sep 17 00:00:00 2001 From: LXY Date: Wed, 13 Apr 2022 10:43:18 +0800 Subject: [PATCH 182/330] Fix un-expected remove when two tablespace-quota has a same role or schema (#202) When there are two namespace_tablespace_quota configurations with the same schema but different tablespaces, removing one quota will remove another one automatically. The same bug happens for the role_tabelspace_quota case. The root cause of this bug is that we can't distinguish these two quotas using only quotaType and targetOid of diskquota.quota_config. This PR fixes this by changing the meaning of the targetOid column of diskquota.quota_config table. When quotaType is role_quota/schema_quota, targetOid refers to role_oid/schema_oid. When quotaType is role_tablespace_quota/namespace_tablespace_quota, targetOid refers to the rowId column of diskquota.target. This will make it possible to distinguish two namespace_tablespace_quota with the same schema and different tablespace. Because their rowId will be different. Co-authored-by: Xuebin Su Co-authored-by: Xiaoran Wang --- .gitignore | 2 +- diskquota--1.0--2.0.sql | 17 ++-- diskquota--2.0--1.0.sql | 4 +- diskquota--2.0.sql | 19 ++-- diskquota.h | 2 +- diskquota_utility.c | 98 ++++++++++++------- quotamodel.c | 25 ++++- .../expected/test_per_segment_config.out | 12 +-- .../sql/test_per_segment_config.sql | 26 +++-- tests/regress/diskquota_schedule | 1 + .../expected/test_tablespace_diff_schema.out | 80 +++++++++++++++ .../test_tablespace_schema_perseg.out | 14 ++- tests/regress/sql/test_relation_size.sql | 3 + .../sql/test_tablespace_diff_schema.sql | 43 ++++++++ .../sql/test_tablespace_schema_perseg.sql | 22 ++++- upgrade_test/expected/2.0_catalog.out | 28 +++--- 16 files changed, 299 insertions(+), 97 deletions(-) create mode 100644 tests/regress/expected/test_tablespace_diff_schema.out create mode 100644 tests/regress/sql/test_tablespace_diff_schema.sql diff --git a/.gitignore b/.gitignore index 375d7244005..bb04034d8a6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ build*/ # The tests results -/results/ +results/ # For IDE/Editors .vscode diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index dc277c382fe..5f8de2b836f 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -4,6 +4,7 @@ ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT 0; CREATE TABLE diskquota.target ( + rowId serial, quotatype int, -- REFERENCES disquota.quota_config.quotatype, primaryOid oid, tablespaceOid oid, -- REFERENCES pg_tablespace.oid, @@ -193,27 +194,27 @@ WITH ), full_quota_config AS ( SELECT - targetOid, + primaryOid, tablespaceoid, quotalimitMB FROM diskquota.quota_config AS config, diskquota.target AS target WHERE - config.targetOid = target.primaryOid AND + config.targetOid = target.rowId AND config.quotaType = target.quotaType AND config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA ) SELECT nspname AS schema_name, - targetoid AS schema_oid, + primaryoid AS schema_oid, spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes FROM full_quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid JOIN + pg_namespace ON primaryoid = pg_namespace.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; @@ -245,27 +246,27 @@ WITH ), full_quota_config AS ( SELECT - targetOid, + primaryOid, tablespaceoid, quotalimitMB FROM diskquota.quota_config AS config, diskquota.target AS target WHERE - config.targetOid = target.primaryOid AND + config.targetOid = target.rowId AND config.quotaType = target.quotaType AND config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA ) SELECT rolname AS role_name, - targetoid AS role_oid, + primaryoid AS role_oid, spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes FROM full_quota_config JOIN - pg_roles ON targetoid = pg_roles.oid JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; -- views end diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index cba56dcaf26..2f55153aeb5 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -63,8 +63,8 @@ DROP FUNCTION diskquota.show_relation_cache_all_seg(); -- UDF end -- table part --- clean up schema_tablespace quota AND rolsize_tablespace quota -DELETE FROM diskquota.quota_config WHERE quotatype = 2 or quotatype = 3; +-- clean up NAMESPACE_TABLESPACE_QUOTA(2), ROLE_TABLESPACE_QUOTA(3) and TABLESPACE_QUOTA(4) +DELETE FROM diskquota.quota_config WHERE quotatype in (2, 3, 4); DROP TABLE diskquota.target; diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index d0c76e171cc..1842a0a5340 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -3,6 +3,8 @@ CREATE SCHEMA diskquota; +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; CREATE TABLE diskquota.quota_config( targetOid oid, quotatype int, @@ -12,6 +14,7 @@ CREATE TABLE diskquota.quota_config( ) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( + rowId serial, quotatype int, --REFERENCES disquota.quota_config.quotatype, primaryOid oid, tablespaceOid oid, --REFERENCES pg_tablespace.oid, @@ -208,27 +211,27 @@ WITH ), full_quota_config AS ( SELECT - targetOid, + primaryOid, tablespaceoid, quotalimitMB FROM diskquota.quota_config AS config, diskquota.target AS target WHERE - config.targetOid = target.primaryOid AND + config.targetOid = target.rowId AND config.quotaType = target.quotaType AND config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA ) SELECT nspname AS schema_name, - targetoid AS schema_oid, + primaryoid AS schema_oid, spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes FROM full_quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; @@ -260,27 +263,27 @@ WITH ), full_quota_config AS ( SELECT - targetOid, + primaryOid, tablespaceoid, quotalimitMB FROM diskquota.quota_config AS config, diskquota.target AS target WHERE - config.targetOid = target.primaryOid AND + config.targetOid = target.rowId AND config.quotaType = target.quotaType AND config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA ) SELECT rolname AS role_name, - targetoid AS role_oid, + primaryoid AS role_oid, spcname AS tablespace_name, tablespaceoid AS tablespace_oid, quotalimitMB AS quota_in_mb, COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes FROM full_quota_config JOIN - pg_roles ON targetoid = pg_roles.oid JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; -- view end diff --git a/diskquota.h b/diskquota.h index 89690b49a45..69fb339e6fe 100644 --- a/diskquota.h +++ b/diskquota.h @@ -48,7 +48,7 @@ typedef enum */ TABLESPACE_QUOTA, - NUM_QUOTA_TYPES + NUM_QUOTA_TYPES, } QuotaType; typedef enum diff --git a/diskquota_utility.c b/diskquota_utility.c index fe5a3e3f6e7..cb5065baa42 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -92,7 +92,7 @@ static void dq_object_access_hook(ObjectAccessType access, Oid classId, O static const char *ddl_err_code_to_err_message(MessageResult code); static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid); -static void set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); +static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); @@ -772,6 +772,7 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) char *rolname; char *sizestr; int64 quota_limit_mb; + int row_id; if (!superuser()) { @@ -795,8 +796,8 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) } SPI_connect(); - set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); - set_quota_config_internal(roleoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + row_id = set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); + set_quota_config_internal(row_id, quota_limit_mb, ROLE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); SPI_finish(); PG_RETURN_VOID(); } @@ -817,6 +818,7 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) char *nspname; char *sizestr; int64 quota_limit_mb; + int row_id; if (!superuser()) { @@ -840,8 +842,8 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) } SPI_connect(); - set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); - set_quota_config_internal(namespaceoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); + row_id = set_target_internal(namespaceoid, spcoid, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA); + set_quota_config_internal(row_id, quota_limit_mb, NAMESPACE_TABLESPACE_QUOTA, INVALID_SEGRATIO, spcoid); SPI_finish(); PG_RETURN_VOID(); } @@ -963,10 +965,13 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, f return; } -static void +static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type) { - int ret; + int ret; + int row_id = -1; + bool is_null = false; + Datum v; /* * If error happens in set_target_internal, just return error messages to @@ -974,12 +979,12 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType */ ret = SPI_execute_with_args( - "select true from diskquota.quota_config as q, diskquota.target as t" + "select t.rowId from diskquota.quota_config as q, diskquota.target as t" " where t.primaryOid = $1" " and t.tablespaceOid = $2" " and t.quotaType = $3" " and t.quotaType = q.quotaType" - " and t.primaryOid = q.targetOid", + " and t.rowId = q.targetOid", 3, (Oid[]){ OIDOID, @@ -994,40 +999,63 @@ set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType NULL, true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select target setting table: error code %d", ret); + if (SPI_processed > 0) + { + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); + } + /* if the schema or role's quota has not been set before */ if (SPI_processed == 0 && quota_limit_mb > 0) { - ret = SPI_execute_with_args("insert into diskquota.target values($1, $2, $3)", 3, - (Oid[]){ - INT4OID, - OIDOID, - OIDOID, - }, - (Datum[]){ - Int32GetDatum(type), - ObjectIdGetDatum(primaryoid), - ObjectIdGetDatum(spcoid), - }, - NULL, false, 0); - if (ret != SPI_OK_INSERT) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + ret = SPI_execute_with_args( + "insert into diskquota.target (quotatype, primaryOid, tablespaceOid) values($1, $2, $3) returning " + "rowId", + 3, + (Oid[]){ + INT4OID, + OIDOID, + OIDOID, + }, + (Datum[]){ + Int32GetDatum(type), + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); + if (ret != SPI_OK_INSERT_RETURNING) elog(ERROR, "cannot insert into quota setting table, error code %d", ret); + + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); } else if (SPI_processed > 0 && quota_limit_mb < 0) { - ret = SPI_execute_with_args("delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2", 2, - (Oid[]){ - OIDOID, - OIDOID, - }, - (Datum[]){ - ObjectIdGetDatum(primaryoid), - ObjectIdGetDatum(spcoid), - }, - NULL, false, 0); - if (ret != SPI_OK_DELETE) elog(ERROR, "cannot delete item from target setting table, error code %d", ret); + ret = SPI_execute_with_args( + "delete from diskquota.target where primaryOid = $1 and tablespaceOid = $2 returning rowId", 2, + (Oid[]){ + OIDOID, + OIDOID, + }, + (Datum[]){ + ObjectIdGetDatum(primaryoid), + ObjectIdGetDatum(spcoid), + }, + NULL, false, 0); + if (ret != SPI_OK_DELETE_RETURNING) + elog(ERROR, "cannot delete item from target setting table, error code %d", ret); + + is_null = false; + v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + Assert(is_null == false); + row_id = DatumGetInt32(v); } /* No need to update the target table */ - return; + return row_id; } /* @@ -1242,7 +1270,7 @@ set_per_segment_quota(PG_FUNCTION_ARGS) ret = SPI_execute_with_args( "UPDATE diskquota.quota_config AS q set segratio = $1 FROM diskquota.target AS t WHERE " - "q.targetOid = t.primaryOid AND (t.quotaType = $2 OR t.quotaType = $3) AND t.quotaType = " + "q.targetOid = t.rowId AND (t.quotaType = $2 OR t.quotaType = $3) AND t.quotaType = " "q.quotaType And t.tablespaceOid = $4", 4, (Oid[]){ diff --git a/quotamodel.c b/quotamodel.c index cae8a4b4951..a6eaaa432dc 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -49,7 +49,7 @@ #define MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES 8192 #define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ -#define NUM_QUOTA_CONFIG_ATTRS 5 +#define NUM_QUOTA_CONFIG_ATTRS 6 typedef struct TableSizeEntry TableSizeEntry; typedef struct NamespaceSizeEntry NamespaceSizeEntry; @@ -1237,12 +1237,21 @@ do_load_quotas(void) /* * read quotas from diskquota.quota_config and target table */ - ret = SPI_execute( + ret = SPI_execute_with_args( "SELECT c.targetOid, c.quotaType, c.quotalimitMB, COALESCE(c.segratio, 0) AS segratio, " - "COALESCE(t.tablespaceoid, 0) AS tablespaceoid " + "COALESCE(t.tablespaceoid, 0) AS tablespaceoid, COALESCE(t.primaryOid, 0) AS primaryoid " "FROM diskquota.quota_config AS c LEFT OUTER JOIN diskquota.target AS t " - "ON c.targetOid = t.primaryOid and c.quotaType = t.quotaType", - true, 0); + "ON c.targetOid = t.rowId AND c.quotaType IN ($1, $2) AND c.quotaType = t.quotaType", + 2, + (Oid[]){ + INT4OID, + INT4OID, + }, + (Datum[]){ + Int32GetDatum(NAMESPACE_TABLESPACE_QUOTA), + Int32GetDatum(ROLE_TABLESPACE_QUOTA), + }, + NULL, true, 0); if (ret != SPI_OK_SELECT) ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); @@ -1278,6 +1287,12 @@ do_load_quotas(void) int64 quota_limit_mb = DatumGetInt64(vals[2]); float segratio = DatumGetFloat4(vals[3]); Oid spcOid = DatumGetObjectId(vals[4]); + Oid primaryOid = DatumGetObjectId(vals[5]); + + if (quotaType == NAMESPACE_TABLESPACE_QUOTA || quotaType == ROLE_TABLESPACE_QUOTA) + { + targetOid = primaryOid; + } if (spcOid == InvalidOid) { diff --git a/tests/isolation2/expected/test_per_segment_config.out b/tests/isolation2/expected/test_per_segment_config.out index 92edda43b16..0d4aa43b426 100644 --- a/tests/isolation2/expected/test_per_segment_config.out +++ b/tests/isolation2/expected/test_per_segment_config.out @@ -41,7 +41,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 1.0 @@ -78,7 +78,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 1.0 @@ -124,7 +124,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 1.0 @@ -166,7 +166,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 1.0 @@ -208,7 +208,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 0.0 @@ -249,7 +249,7 @@ COMMIT 2: COMMIT; COMMIT -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- 0.0 diff --git a/tests/isolation2/sql/test_per_segment_config.sql b/tests/isolation2/sql/test_per_segment_config.sql index c3d0d35534c..7592ffc00a2 100644 --- a/tests/isolation2/sql/test_per_segment_config.sql +++ b/tests/isolation2/sql/test_per_segment_config.sql @@ -9,7 +9,7 @@ CREATE SCHEMA s101; DROP TABLESPACE IF EXISTS spc101; CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; --- +-- -- There is no tablesapce per segment quota configed yet -- @@ -22,13 +22,14 @@ CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; truncate table diskquota.target; --- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, 1: BEGIN; 1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); 2: BEGIN; @@ -37,13 +38,14 @@ truncate table diskquota.target; 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; truncate table diskquota.target; --- +-- -- There is already a tablesapce per segment quota configed -- @@ -57,13 +59,14 @@ SELECT diskquota.set_per_segment_quota('spc101', 2); 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; truncate table diskquota.target; --- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, SELECT diskquota.set_per_segment_quota('spc101', 2); 1: BEGIN; 1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); @@ -73,7 +76,8 @@ SELECT diskquota.set_per_segment_quota('spc101', 2); 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; @@ -89,7 +93,8 @@ SELECT diskquota.set_per_segment_quota('spc101', 2); 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; @@ -105,7 +110,8 @@ SELECT diskquota.set_per_segment_quota('spc101', 2); 2<: 2: COMMIT; -SELECT segratio from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 's101'; +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; -- cleanup truncate table diskquota.quota_config; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index e1350e694db..6c62469bf77 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -34,5 +34,6 @@ test: test_ctas_schema test: test_ctas_tablespace_role test: test_ctas_tablespace_schema test: test_default_tablespace +test: test_tablespace_diff_schema test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_tablespace_diff_schema.out b/tests/regress/expected/test_tablespace_diff_schema.out new file mode 100644 index 00000000000..ae6a1d356a7 --- /dev/null +++ b/tests/regress/expected/test_tablespace_diff_schema.out @@ -0,0 +1,80 @@ +-- allow set quota for different schema in the same tablespace +-- delete quota for one schema will not drop other quotas with different schema in the same tablespace +-- start_ignore +\! mkdir -p /tmp/spc_diff_schema +-- end_ignore +CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; +CREATE SCHEMA schema_in_tablespc; +SET search_path TO schema_in_tablespc; +CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- with hardlimits off, expect to success +INSERT INTO a SELECT generate_series(1,1000000); +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace:spc_diff_schema schema:schema_in_tablespc diskquota exceeded +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema + schema_in_tablespc | pg_default +(2 rows) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace:spc_diff_schema schema:schema_in_tablespc diskquota exceeded +reset search_path; +DROP TABLE IF EXISTS schema_in_tablespc.a; +DROP tablespace IF EXISTS spc_diff_schema; +DROP SCHEMA IF EXISTS schema_in_tablespc; +-- start_ignore +\! rmdir /tmp/spc_diff_schema + -- end_ignore diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index b8d08021599..f97055674a3 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -202,7 +202,7 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -------------+-----------------+-------------+----------------------------- (0 rows) --- test config per segment quota +-- test config per segment quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); set_per_segment_quota ----------------------- @@ -221,7 +221,9 @@ SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2' (1 row) -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; segratio ---------- 1 @@ -238,7 +240,9 @@ SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targe ---------- (0 rows) -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; segratio ---------- 0 @@ -256,7 +260,9 @@ SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targe 3 (1 row) -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; segratio ---------- 3 diff --git a/tests/regress/sql/test_relation_size.sql b/tests/regress/sql/test_relation_size.sql index 0f04d9c9037..b783ec24227 100644 --- a/tests/regress/sql/test_relation_size.sql +++ b/tests/regress/sql/test_relation_size.sql @@ -26,6 +26,9 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; +-- start_ignore +\! rm -rf /tmp/test_spc + -- end_ignore CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); INSERT INTO ao SELECT generate_series(1, 10000); diff --git a/tests/regress/sql/test_tablespace_diff_schema.sql b/tests/regress/sql/test_tablespace_diff_schema.sql new file mode 100644 index 00000000000..28c8447a499 --- /dev/null +++ b/tests/regress/sql/test_tablespace_diff_schema.sql @@ -0,0 +1,43 @@ +-- allow set quota for different schema in the same tablespace +-- delete quota for one schema will not drop other quotas with different schema in the same tablespace + +-- start_ignore +\! mkdir -p /tmp/spc_diff_schema +-- end_ignore + +CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; +CREATE SCHEMA schema_in_tablespc; +SET search_path TO schema_in_tablespc; + +CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- with hardlimits off, expect to success +INSERT INTO a SELECT generate_series(1,1000000); + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); + +reset search_path; +DROP TABLE IF EXISTS schema_in_tablespc.a; +DROP tablespace IF EXISTS spc_diff_schema; +DROP SCHEMA IF EXISTS schema_in_tablespc; + +-- start_ignore +\! rmdir /tmp/spc_diff_schema + -- end_ignore diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index ae6dc51ee3a..bab514e1c0a 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -83,17 +83,31 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; --- test config per segment quota +-- test config per segment quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; + SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2','1 MB'); -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','-2'); + SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','3'); + SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; -SELECT distinct(segratio) from diskquota.quota_config, pg_namespace where targetoid = oid and nspname = 'spcs2_perseg'; + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; RESET search_path; DROP TABLE spcs1_perseg.a; diff --git a/upgrade_test/expected/2.0_catalog.out b/upgrade_test/expected/2.0_catalog.out index 67d66fc8319..85d3e98cb0c 100644 --- a/upgrade_test/expected/2.0_catalog.out +++ b/upgrade_test/expected/2.0_catalog.out @@ -25,8 +25,8 @@ GROUP BY t1.typname ORDER BY t1.typname; - typname | typname -----------------------------------------+----------------------------------------------------- + typname | typname +----------------------------------------+---------------------------------------------------------------------------------- blackmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} blackmap_entry | {bool,int4,oid,oid,oid} blackmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} @@ -40,8 +40,9 @@ ORDER BY show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} state | {int4,int4,oid,tid,xid,xid,cid,cid} table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} - target | {int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} -(14 rows) + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} + target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} +(15 rows) -- types end -- tables @@ -72,7 +73,8 @@ ORDER BY table_size_pkey | | target | {target} | target_pkey | | -(12 rows) + target_rowid_seq | {target_rowid_seq} | +(13 rows) -- tables end -- UDF @@ -191,21 +193,21 @@ ORDER by | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + - | | SELECT config.targetoid, + + | | SELECT target.primaryoid, + | | target.tablespaceoid, + | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = target.primaryoid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + | | ) + | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.targetoid AS role_oid, + + | | full_quota_config.primaryoid AS role_oid, + | | pg_tablespace.spcname AS tablespace_name, + | | full_quota_config.tablespaceoid AS tablespace_oid, + | | full_quota_config.quotalimitmb AS quota_in_mb, + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.targetoid = pg_roles.oid))) + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + @@ -241,21 +243,21 @@ ORDER by | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + - | | SELECT config.targetoid, + + | | SELECT target.primaryoid, + | | target.tablespaceoid, + | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = target.primaryoid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.targetoid AS schema_oid, + + | | full_quota_config.primaryoid AS schema_oid, + | | pg_tablespace.spcname AS tablespace_name, + | | full_quota_config.tablespaceoid AS tablespace_oid, + | | full_quota_config.quotalimitmb AS quota_in_mb, + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.targetoid = pg_namespace.oid))) + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); (6 rows) From a32b331619b2523cb944b3ab91763a6466add4fe Mon Sep 17 00:00:00 2001 From: LXY Date: Wed, 13 Apr 2022 16:35:40 +0800 Subject: [PATCH 183/330] Wait next loop for bgworker to refresh before INSERT in testcase (#205) `diskquota.naptime` is 2 by default in the release version. So it's better to wait for the bgworker to fresh blackmap after INSERT. --- tests/regress/expected/test_tablespace_diff_schema.out | 7 +++++++ tests/regress/sql/test_tablespace_diff_schema.sql | 2 ++ 2 files changed, 9 insertions(+) diff --git a/tests/regress/expected/test_tablespace_diff_schema.out b/tests/regress/expected/test_tablespace_diff_schema.out index ae6a1d356a7..0276714ddea 100644 --- a/tests/regress/expected/test_tablespace_diff_schema.out +++ b/tests/regress/expected/test_tablespace_diff_schema.out @@ -22,6 +22,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- with hardlimits off, expect to success INSERT INTO a SELECT generate_series(1,1000000); +-- wait for next loop for bgworker to add it to blackmap +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect to fail INSERT INTO a SELECT generate_series(1,1000000); ERROR: tablespace:spc_diff_schema schema:schema_in_tablespc diskquota exceeded diff --git a/tests/regress/sql/test_tablespace_diff_schema.sql b/tests/regress/sql/test_tablespace_diff_schema.sql index 28c8447a499..4e432e99cbb 100644 --- a/tests/regress/sql/test_tablespace_diff_schema.sql +++ b/tests/regress/sql/test_tablespace_diff_schema.sql @@ -17,6 +17,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- with hardlimits off, expect to success INSERT INTO a SELECT generate_series(1,1000000); +-- wait for next loop for bgworker to add it to blackmap +SELECT diskquota.wait_for_worker_new_epoch(); -- expect to fail INSERT INTO a SELECT generate_series(1,1000000); From 5912e7dbdde1258d71f4883393d696f59b82687d Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Wed, 13 Apr 2022 18:12:14 +0800 Subject: [PATCH 184/330] Add UDF diskquota.show_segment_ratio_quota_view (#204) --- diskquota--1.0--2.0.sql | 11 +++++++++++ diskquota--2.0--1.0.sql | 1 + diskquota--2.0.sql | 11 +++++++++++ .../expected/test_tablespace_schema_perseg.out | 7 +++++++ tests/regress/sql/test_tablespace_schema_perseg.sql | 1 + upgrade_test/expected/2.0_catalog.out | 10 ++++++++-- 6 files changed, 39 insertions(+), 2 deletions(-) diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 5f8de2b836f..1b3378a3899 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -269,5 +269,16 @@ FROM pg_roles ON primaryoid = pg_roles.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + -- views end diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index 2f55153aeb5..2675171d114 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -4,6 +4,7 @@ DROP VIEW diskquota.blackmap; DROP VIEW diskquota.show_fast_schema_tablespace_quota_view; DROP VIEW diskquota.show_fast_role_tablespace_quota_view; +DROP VIEW diskquota.show_segment_ratio_quota_view; /* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS SELECT ( diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 1842a0a5340..67d8aa22fbc 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -286,6 +286,17 @@ FROM pg_roles ON primaryoid = pg_roles.oid JOIN pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + -- view end -- prepare to boot diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index f97055674a3..3af1b0f5c8b 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -268,6 +268,13 @@ SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.t 3 (1 row) +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('schemaspc_perseg2', 'schemaspc_perseg'); + tablespace_name | per_seg_quota_ratio +-------------------+--------------------- + schemaspc_perseg | 2 + schemaspc_perseg2 | 3 +(2 rows) + RESET search_path; DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index bab514e1c0a..8fb1c33f3d1 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -108,6 +108,7 @@ SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targe SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('schemaspc_perseg2', 'schemaspc_perseg'); RESET search_path; DROP TABLE spcs1_perseg.a; diff --git a/upgrade_test/expected/2.0_catalog.out b/upgrade_test/expected/2.0_catalog.out index 85d3e98cb0c..476936bc069 100644 --- a/upgrade_test/expected/2.0_catalog.out +++ b/upgrade_test/expected/2.0_catalog.out @@ -38,11 +38,12 @@ ORDER BY show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} show_fast_schema_quota_view | {name,int8,oid,numeric} show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} state | {int4,int4,oid,tid,xid,xid,cid,cid} table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} -(15 rows) +(16 rows) -- types end -- tables @@ -260,7 +261,12 @@ ORDER by | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); -(6 rows) + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(7 rows) -- views end DROP FUNCTION typeid_to_name (oid[]); From 8fef0af239f393fb689d3f8e94ae4583bfd150a0 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 14 Apr 2022 10:25:05 +0800 Subject: [PATCH 185/330] clean blackmap after drop diskquota (#196) clean blackmap after drop diskquota Fix hardlimit exceed after drop diskquota. fix this scenario: - create hard limit - blackmap on segment was updated - drop diskquota - the blackmap on segment is not cleanup use ObjectAccess hook on segment. when dropping diskquota. clean up the in memory blackmap. --- gp_activetable.c | 11 +++++++ tests/regress/diskquota_schedule | 1 + .../test_clean_blackmap_after_drop.out | 30 +++++++++++++++++++ .../sql/test_clean_blackmap_after_drop.sql | 26 ++++++++++++++++ 4 files changed, 68 insertions(+) create mode 100644 tests/regress/expected/test_clean_blackmap_after_drop.out create mode 100644 tests/regress/sql/test_clean_blackmap_after_drop.sql diff --git a/gp_activetable.c b/gp_activetable.c index e7166f30ff0..7ff2e8270aa 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -20,10 +20,12 @@ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/objectaccess.h" +#include "catalog/pg_extension.h" #include "cdb/cdbdisp_query.h" #include "cdb/cdbdispatchresult.h" #include "cdb/cdbvars.h" #include "commands/dbcommands.h" +#include "commands/extension.h" #include "executor/spi.h" #include "funcapi.h" #include "libpq-fe.h" @@ -170,6 +172,15 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, { if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); + // if is 'drop extension diskquota' + if (classId == ExtensionRelationId) + { + if (get_extension_oid("diskquota", true) == objectId) + { + invalidate_database_blackmap(MyDatabaseId); + } + } + /* TODO: do we need to use "&&" instead of "||"? */ if (classId != RelationRelationId || subId != 0) { diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 6c62469bf77..09ef44d27ae 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -28,6 +28,7 @@ test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly test: test_blackmap +test: test_clean_blackmap_after_drop test: test_ctas_pause test: test_ctas_role test: test_ctas_schema diff --git a/tests/regress/expected/test_clean_blackmap_after_drop.out b/tests/regress/expected/test_clean_blackmap_after_drop.out new file mode 100644 index 00000000000..20e1be68e0d --- /dev/null +++ b/tests/regress/expected/test_clean_blackmap_after_drop.out @@ -0,0 +1,30 @@ +CREATE DATABASE test_clean_blackmap_after_drop; +\c test_clean_blackmap_after_drop +CREATE EXTENSION diskquota; +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE ROLE r; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('r', '1MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO r; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO b SELECT generate_series(1, 100000); -- fail +ERROR: role's disk space quota exceeded with name:40716 (seg2 127.0.0.1:6004 pid=1245042) +DROP EXTENSION diskquota; +INSERT INTO b SELECT generate_series(1, 100); -- ok +\c contrib_regression +DROP DATABASE test_clean_blackmap_after_drop; +DROP ROLE r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/sql/test_clean_blackmap_after_drop.sql b/tests/regress/sql/test_clean_blackmap_after_drop.sql new file mode 100644 index 00000000000..3ead23b4192 --- /dev/null +++ b/tests/regress/sql/test_clean_blackmap_after_drop.sql @@ -0,0 +1,26 @@ +CREATE DATABASE test_clean_blackmap_after_drop; + +\c test_clean_blackmap_after_drop +CREATE EXTENSION diskquota; + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null + +CREATE ROLE r; +SELECT diskquota.set_role_quota('r', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO r; +SELECT diskquota.wait_for_worker_new_epoch(); + +INSERT INTO b SELECT generate_series(1, 100000); -- fail + +DROP EXTENSION diskquota; + +INSERT INTO b SELECT generate_series(1, 100); -- ok + +\c contrib_regression +DROP DATABASE test_clean_blackmap_after_drop; +DROP ROLE r; + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null From 218ec93ee14db3e6e63491e7908631f236674d9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 14 Apr 2022 10:32:21 +0800 Subject: [PATCH 186/330] Reset memory account to fix memory leak (#203) Diskquota workers dispatch queries periodically. For each dispatch, one or more memory accounts will be created to keep track of the memory usage. These accounts should be reset after the query finishes to avoid memory leak. Part of the stack trace of account creation looks like this: ``` fun:CreateMemoryAccountImpl fun:MemoryAccounting_CreateAccount fun:serializeNode fun:cdbdisp_buildPlanQueryParms fun:cdbdisp_dispatchX fun:CdbDispatchPlan fun:standard_ExecutorStart fun:_SPI_pquery.constprop.11 fun:_SPI_execute_plan fun:SPI_execute fun:do_load_quotas fun:load_quotas fun:refresh_disk_quota_model fun:disk_quota_worker_main fun:StartBackgroundWorker fun:do_start_bgworker fun:maybe_start_bgworker ``` This patch fixes the memory leak issue by calling the reset function at the end of the worker loop. --- diskquota.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/diskquota.c b/diskquota.c index 1ed3b8d88d2..a0801ec0a04 100644 --- a/diskquota.c +++ b/diskquota.c @@ -425,6 +425,8 @@ disk_quota_worker_main(Datum main_arg) /* Do the work */ if (!diskquota_is_paused()) refresh_disk_quota_model(false); + /* Reset memory account to fix memory leak */ + MemoryAccounting_Reset(); worker_increase_epoch(MyDatabaseId); } From 0facdf5cafa653f2f31c1df8c742cb814552e805 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Thu, 14 Apr 2022 10:58:15 +0800 Subject: [PATCH 187/330] fix test case for test_manytable (#197) rename manytable test to activetable_limit activetable_limit is used to test if activetable_limit works. if number of current activetable > max_active_tables. bgworker will print some warring message. GPDB should not crash, but diskquota's basic function does not guarantee. --- tests/regress/diskquota_schedule | 2 +- .../expected/test_activetable_limit.out | 38 ++++++++++++++++ tests/regress/expected/test_manytable.out | 24 ----------- tests/regress/sql/test_activetable_limit.sql | 43 +++++++++++++++++++ tests/regress/sql/test_manytable.sql | 30 ------------- 5 files changed, 82 insertions(+), 55 deletions(-) create mode 100644 tests/regress/expected/test_activetable_limit.out delete mode 100644 tests/regress/expected/test_manytable.out create mode 100644 tests/regress/sql/test_activetable_limit.sql delete mode 100644 tests/regress/sql/test_manytable.sql diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 09ef44d27ae..748090889f3 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -23,7 +23,7 @@ test: test_partition test: test_vacuum test: test_primary_failure test: test_extension -test: test_manytable +test: test_activetable_limit test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly diff --git a/tests/regress/expected/test_activetable_limit.out b/tests/regress/expected/test_activetable_limit.out new file mode 100644 index 00000000000..868ab0a2ec4 --- /dev/null +++ b/tests/regress/expected/test_activetable_limit.out @@ -0,0 +1,38 @@ +-- table in 'diskquota not enabled database' should not be activetable +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +\! gpstop -arf > /dev/null +\c +CREATE DATABASE test_tablenum_limit_01; +CREATE DATABASE test_tablenum_limit_02; +\c test_tablenum_limit_01 +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); +\c test_tablenum_limit_02 +CREATE EXTENSION diskquota; +CREATE SCHEMA s; +SELECT diskquota.set_schema_quota('s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); +INSERT INTO s.t1 SELECT generate_series(1,100000); -- expect failed. diskquota should works. activetable = 1 +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); +INSERT INTO s.t2 SELECT generate_series(1,100000); +ERROR: schema's disk space quota exceeded with name:s +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); +INSERT INTO s.t3 SELECT generate_series(1,100000); -- should not crash. activetable = 3 +ERROR: schema's disk space quota exceeded with name:s +DROP EXTENSION diskquota; +-- wait worker exit +\! sleep 1 +\c contrib_regression +DROP DATABASE test_tablenum_limit_01; +DROP DATABASE test_tablenum_limit_02; +\! gpconfig -r diskquota.max_active_tables > /dev/null +\! gpstop -arf > /dev/null diff --git a/tests/regress/expected/test_manytable.out b/tests/regress/expected/test_manytable.out deleted file mode 100644 index 5302de48ec8..00000000000 --- a/tests/regress/expected/test_manytable.out +++ /dev/null @@ -1,24 +0,0 @@ --- start_ignore --- test case manytable change cluster level config, can not run in parallel. -\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null --- end_ignore -\! echo $? -0 -CREATE DATABASE test_manytable01; -CREATE DATABASE test_manytable02; -\c test_manytable01 -CREATE TABLE a01(i int) DISTRIBUTED BY (i); -CREATE TABLE a02(i int) DISTRIBUTED BY (i); -CREATE TABLE a03(i int) DISTRIBUTED BY (i); -INSERT INTO a01 values(generate_series(0, 500)); -INSERT INTO a02 values(generate_series(0, 500)); -INSERT INTO a03 values(generate_series(0, 500)); -\c test_manytable02 -CREATE TABLE b01(i int) DISTRIBUTED BY (i); -INSERT INTO b01 values(generate_series(0, 500)); -\c postgres -DROP DATABASE test_manytable01; -DROP DATABASE test_manytable02; --- start_ignore -\! gpconfig -c diskquota.max_active_tables -v 1024 > /dev/null --- end_ignore diff --git a/tests/regress/sql/test_activetable_limit.sql b/tests/regress/sql/test_activetable_limit.sql new file mode 100644 index 00000000000..47a439bf89e --- /dev/null +++ b/tests/regress/sql/test_activetable_limit.sql @@ -0,0 +1,43 @@ +-- table in 'diskquota not enabled database' should not be activetable +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +\! gpstop -arf > /dev/null + +\c + +CREATE DATABASE test_tablenum_limit_01; +CREATE DATABASE test_tablenum_limit_02; + +\c test_tablenum_limit_01 + +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); + +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); + +\c test_tablenum_limit_02 +CREATE EXTENSION diskquota; +CREATE SCHEMA s; +SELECT diskquota.set_schema_quota('s', '1 MB'); + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); +INSERT INTO s.t1 SELECT generate_series(1,100000); -- expect failed. diskquota should works. activetable = 1 +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); +INSERT INTO s.t2 SELECT generate_series(1,100000); + +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); +INSERT INTO s.t3 SELECT generate_series(1,100000); -- should not crash. activetable = 3 + +DROP EXTENSION diskquota; + +-- wait worker exit +\! sleep 1 + +\c contrib_regression +DROP DATABASE test_tablenum_limit_01; +DROP DATABASE test_tablenum_limit_02; + +\! gpconfig -r diskquota.max_active_tables > /dev/null +\! gpstop -arf > /dev/null diff --git a/tests/regress/sql/test_manytable.sql b/tests/regress/sql/test_manytable.sql deleted file mode 100644 index a5f24cb7cee..00000000000 --- a/tests/regress/sql/test_manytable.sql +++ /dev/null @@ -1,30 +0,0 @@ --- start_ignore -\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null --- end_ignore -\! echo $? - -CREATE DATABASE test_manytable01; -CREATE DATABASE test_manytable02; - -\c test_manytable01 - -CREATE TABLE a01(i int) DISTRIBUTED BY (i); -CREATE TABLE a02(i int) DISTRIBUTED BY (i); -CREATE TABLE a03(i int) DISTRIBUTED BY (i); - -INSERT INTO a01 values(generate_series(0, 500)); -INSERT INTO a02 values(generate_series(0, 500)); -INSERT INTO a03 values(generate_series(0, 500)); - -\c test_manytable02 -CREATE TABLE b01(i int) DISTRIBUTED BY (i); -INSERT INTO b01 values(generate_series(0, 500)); - -\c postgres -DROP DATABASE test_manytable01; -DROP DATABASE test_manytable02; - --- start_ignore -\! gpconfig -r diskquota.max_active_tables -\! gpstop -far --- end_ignore From d115c25d62f4c06d04daafea4cb1fbb80a9144d6 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Sat, 16 Apr 2022 17:12:13 +0800 Subject: [PATCH 188/330] some fix for release CI (#206) clean blackmap after drop diskquota. follow up #196 also fix test case activetable_limit failed when naptime > 0 --- quotamodel.c | 3 +- .../expected/test_activetable_limit.out | 32 +++++++++++++++---- .../test_clean_blackmap_after_drop.out | 4 +-- tests/regress/sql/test_activetable_limit.sql | 27 ++++++++++++---- .../sql/test_clean_blackmap_after_drop.sql | 2 +- 5 files changed, 51 insertions(+), 17 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index a6eaaa432dc..4ad9a45bfba 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -124,6 +124,7 @@ struct BlackMapEntry Oid tablespaceoid; uint32 targettype; /* + * TODO refactor this data structure * QD index the blackmap by (targetoid, databaseoid, tablespaceoid, targettype). * QE index the blackmap by (relfilenode). */ @@ -1499,7 +1500,7 @@ invalidate_database_blackmap(Oid dbid) hash_seq_init(&iter, disk_quota_black_map); while ((entry = hash_seq_search(&iter)) != NULL) { - if (entry->databaseoid == dbid) + if (entry->databaseoid == dbid || entry->relfilenode.dbNode == dbid) { hash_search(disk_quota_black_map, entry, HASH_REMOVE, NULL); } diff --git a/tests/regress/expected/test_activetable_limit.out b/tests/regress/expected/test_activetable_limit.out index 868ab0a2ec4..0232f7e64cd 100644 --- a/tests/regress/expected/test_activetable_limit.out +++ b/tests/regress/expected/test_activetable_limit.out @@ -20,14 +20,34 @@ SELECT diskquota.set_schema_quota('s', '1 MB'); (1 row) -CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -INSERT INTO s.t1 SELECT generate_series(1,100000); -- expect failed. diskquota should works. activetable = 1 -CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -INSERT INTO s.t2 SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 +INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 +INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed ERROR: schema's disk space quota exceeded with name:s -CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -INSERT INTO s.t3 SELECT generate_series(1,100000); -- should not crash. activetable = 3 +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. +INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed ERROR: schema's disk space quota exceeded with name:s +-- Q: why diskquota still works when activetable = 3? +-- A: the activetable limit by shmem size, calculate by hash_estimate_size() +-- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables +-- the real capacity of this data structure based on the hash conflict probability. +-- so we can not predict when the data structure will be fill in fully. +-- +-- this test case is useless, remove this if anyone dislike it. +-- but the hash capacity is smaller than 6, so the test case works for issue 51 DROP EXTENSION diskquota; -- wait worker exit \! sleep 1 diff --git a/tests/regress/expected/test_clean_blackmap_after_drop.out b/tests/regress/expected/test_clean_blackmap_after_drop.out index 20e1be68e0d..e17ff20e6c0 100644 --- a/tests/regress/expected/test_clean_blackmap_after_drop.out +++ b/tests/regress/expected/test_clean_blackmap_after_drop.out @@ -19,8 +19,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -INSERT INTO b SELECT generate_series(1, 100000); -- fail -ERROR: role's disk space quota exceeded with name:40716 (seg2 127.0.0.1:6004 pid=1245042) +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: role's disk space quota exceeded with name:16574 (seg0 127.0.0.1:6002 pid=356116) DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok \c contrib_regression diff --git a/tests/regress/sql/test_activetable_limit.sql b/tests/regress/sql/test_activetable_limit.sql index 47a439bf89e..7c7bf9ded5e 100644 --- a/tests/regress/sql/test_activetable_limit.sql +++ b/tests/regress/sql/test_activetable_limit.sql @@ -22,13 +22,26 @@ CREATE EXTENSION diskquota; CREATE SCHEMA s; SELECT diskquota.set_schema_quota('s', '1 MB'); -CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -INSERT INTO s.t1 SELECT generate_series(1,100000); -- expect failed. diskquota should works. activetable = 1 -CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -INSERT INTO s.t2 SELECT generate_series(1,100000); - -CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -INSERT INTO s.t3 SELECT generate_series(1,100000); -- should not crash. activetable = 3 +SELECT diskquota.wait_for_worker_new_epoch(); + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 +INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write + +SELECT diskquota.wait_for_worker_new_epoch(); + +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 +INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. +INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed + +-- Q: why diskquota still works when activetable = 3? +-- A: the activetable limit by shmem size, calculate by hash_estimate_size() +-- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables +-- the real capacity of this data structure based on the hash conflict probability. +-- so we can not predict when the data structure will be fill in fully. +-- +-- this test case is useless, remove this if anyone dislike it. +-- but the hash capacity is smaller than 6, so the test case works for issue 51 DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_clean_blackmap_after_drop.sql b/tests/regress/sql/test_clean_blackmap_after_drop.sql index 3ead23b4192..debb2c5b909 100644 --- a/tests/regress/sql/test_clean_blackmap_after_drop.sql +++ b/tests/regress/sql/test_clean_blackmap_after_drop.sql @@ -12,7 +12,7 @@ CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); ALTER TABLE b OWNER TO r; SELECT diskquota.wait_for_worker_new_epoch(); -INSERT INTO b SELECT generate_series(1, 100000); -- fail +INSERT INTO b SELECT generate_series(1, 100000000); -- fail DROP EXTENSION diskquota; From c6671d2dc602cfdfa5cd44d2d8aac67af27fdaf4 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 18 Apr 2022 18:27:25 +0800 Subject: [PATCH 189/330] Fix mistaken log statement (#207) And add contraries that the refresh_black_map() should not be executed on QD. --- quotamodel.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 4ad9a45bfba..0091acd879c 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1622,8 +1622,10 @@ refresh_blackmap(PG_FUNCTION_ARGS) HASHCTL hashctl; int ret_code; - if (!superuser()) errmsg("must be superuser to update blackmap"); - + if (!superuser()) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to update blackmap"))); + if (IS_QUERY_DISPATCHER()) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("\"refresh_blackmap()\" can only be executed on QE."))); if (ARR_NDIM(blackmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); From 6e0b32d67db6c8693eb01d8761708ab7c4d5ba3b Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 19 Apr 2022 09:29:09 +0800 Subject: [PATCH 190/330] Fix flaky ctas_pause on release build (#210) Caused by the longger naptime for release build. --- tests/regress/expected/test_ctas_pause.out | 4 ++-- tests/regress/sql/test_ctas_pause.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out index b3b96a8f694..9c7bfec8447 100644 --- a/tests/regress/expected/test_ctas_pause.out +++ b/tests/regress/expected/test_ctas_pause.out @@ -15,7 +15,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ERROR: schema's disk space quota exceeded with name:110528 (seg1 127.0.0.1:6003 pid=73892) @@ -25,7 +25,7 @@ SELECT diskquota.pause(); (1 row) -CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- disable hardlimit and do some clean-ups. diff --git a/tests/regress/sql/test_ctas_pause.sql b/tests/regress/sql/test_ctas_pause.sql index 020f393177f..425344fbb77 100644 --- a/tests/regress/sql/test_ctas_pause.sql +++ b/tests/regress/sql/test_ctas_pause.sql @@ -7,11 +7,11 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SELECT diskquota.wait_for_worker_new_epoch(); -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect fail +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail SELECT diskquota.pause(); -CREATE TABLE t1 (i) AS SELECT generate_series(1,5000000) DISTRIBUTED BY (i); -- expect succeed +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed -- disable hardlimit and do some clean-ups. \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null From 91be66ef128f226547233be404349e0918cae8a4 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 19 Apr 2022 10:53:16 +0800 Subject: [PATCH 191/330] Reformat quota exceeded message (#209) Co-authored-by: Sasasu --- quotamodel.c | 12 ++++---- tests/init_file | 16 +++++----- tests/isolation2/expected/test_blackmap.out | 30 +++++++++---------- .../expected/test_postmaster_restart.out | 4 +-- .../expected/test_activetable_limit.out | 4 +-- tests/regress/expected/test_appendonly.out | 4 +-- .../test_clean_blackmap_after_drop.out | 2 +- tests/regress/expected/test_column.out | 6 ++-- tests/regress/expected/test_copy.out | 2 +- tests/regress/expected/test_ctas_pause.out | 2 +- .../expected/test_default_tablespace.out | 8 ++--- tests/regress/expected/test_delete_quota.out | 2 +- .../expected/test_drop_after_pause.out | 2 +- tests/regress/expected/test_drop_table.out | 2 +- tests/regress/expected/test_extension.out | 18 +++++------ tests/regress/expected/test_index.out | 4 +-- .../expected/test_insert_after_drop.out | 2 +- tests/regress/expected/test_partition.out | 4 +-- .../expected/test_pause_and_resume.out | 4 +-- .../test_pause_and_resume_multiple_db.out | 12 ++++---- .../regress/expected/test_primary_failure.out | 4 +-- tests/regress/expected/test_rename.out | 12 ++++---- tests/regress/expected/test_reschema.out | 2 +- tests/regress/expected/test_role.out | 6 ++-- tests/regress/expected/test_schema.out | 8 ++--- .../expected/test_tablespace_diff_schema.out | 4 +-- .../regress/expected/test_tablespace_role.out | 10 +++---- .../expected/test_tablespace_role_perseg.out | 12 ++++---- .../expected/test_tablespace_schema.out | 10 +++---- .../test_tablespace_schema_perseg.out | 12 ++++---- tests/regress/expected/test_temp_role.out | 2 +- tests/regress/expected/test_toast.out | 2 +- tests/regress/expected/test_truncate.out | 4 +-- tests/regress/expected/test_update.out | 2 +- tests/regress/expected/test_vacuum.out | 4 +-- .../1.0_test_in_2.0_quota_create_in_1.0.out | 4 +-- 36 files changed, 119 insertions(+), 119 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 0091acd879c..3097d08b5a7 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1551,34 +1551,34 @@ export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) switch (blackentry->targettype) { case NAMESPACE_QUOTA: - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name:%s", + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name: %s", GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_QUOTA: - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name:%s", + ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name: %s", GetUserName(blackentry->targetoid, skip_name)))); break; case NAMESPACE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s schema:%s diskquota exceeded per segment quota", + errmsg("tablespace: %s, schema: %s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); else ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), errmsg("tablespace:%s schema:%s diskquota exceeded", + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, schema: %s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetNamespaceName(blackentry->targetoid, skip_name)))); break; case ROLE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), - errmsg("tablespace:%s role:%s diskquota exceeded per segment quota", + errmsg("tablespace: %s, role: %s diskquota exceeded per segment quota", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); else ereport(ERROR, - (errcode(ERRCODE_DISK_FULL), errmsg("tablespace:%s role:%s diskquota exceeded", + (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, role: %s diskquota exceeded", GetTablespaceName(blackentry->tablespaceoid, skip_name), GetUserName(blackentry->targetoid, skip_name)))); break; diff --git a/tests/init_file b/tests/init_file index 114c58e4a24..498482f3a9a 100644 --- a/tests/init_file +++ b/tests/init_file @@ -21,15 +21,15 @@ m/(slice\d+ [0-9.]+:\d+ pid=\d+)/ s/(slice\d+ [0-9.]+:\d+ pid=\d+)// # Remove oid of schema/role/tablespace from error message. -m/ERROR: role's disk space quota exceeded with name:\d+.*/ -s/ERROR: role's disk space quota exceeded with name:\d+.*/[hardlimit] role's disk space quota exceeded/ +m/ERROR: role's disk space quota exceeded with name: \d+.*/ +s/ERROR: role's disk space quota exceeded with name: \d+.*/[hardlimit] role's disk space quota exceeded/ -m/ERROR: schema's disk space quota exceeded with name:\d+.*/ -s/ERROR: schema's disk space quota exceeded with name:\d+.*/[hardlimit] schema's disk space quota exceeded/ +m/ERROR: schema's disk space quota exceeded with name: \d+.*/ +s/ERROR: schema's disk space quota exceeded with name: \d+.*/[hardlimit] schema's disk space quota exceeded/ -m/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/ -s/ERROR: tablespace:\d+ role:\d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ +m/ERROR: tablespace: \d+, role: \d+ diskquota exceeded.*/ +s/ERROR: tablespace: \d+, role: \d+ diskquota exceeded.*/[hardlimit] tablespace-role's disk space quota exceeded/ -m/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/ -s/ERROR: tablespace:\d+ schema:\d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ +m/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/ +s/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ -- end_matchsubs diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_blackmap.out index 7eacd4cfb66..7decc2f533a 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_blackmap.out @@ -48,7 +48,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -87,7 +87,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -126,7 +126,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -167,7 +167,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -199,7 +199,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -230,7 +230,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) -- Clean up the blackmap on seg0. SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_blackmap @@ -314,7 +314,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -359,7 +359,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: role's disk space quota exceeded with name:10 (seg0 127.0.0.1:6002 pid=4675) +ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -404,7 +404,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -449,7 +449,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -494,7 +494,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 schema:2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -539,7 +539,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace:1663 role:10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -586,7 +586,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -634,7 +634,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. @@ -682,7 +682,7 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name:2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) 1: ABORT; ABORT -- Clean up the blackmap on seg0. diff --git a/tests/isolation2/expected/test_postmaster_restart.out b/tests/isolation2/expected/test_postmaster_restart.out index f15ccfd4c1e..a35cfb7fce9 100644 --- a/tests/isolation2/expected/test_postmaster_restart.out +++ b/tests/isolation2/expected/test_postmaster_restart.out @@ -27,7 +27,7 @@ SET -- expect fail 1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name:157893 (seg0 127.0.0.1:6002 pid=1025673) +ERROR: schema's disk space quota exceeded with name: 157893 (seg0 127.0.0.1:6002 pid=1025673) 1q: ... -- launcher should exist @@ -113,7 +113,7 @@ SET (1 row) -- expect fail 1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name:158089 (seg0 127.0.0.1:6002 pid=1027799) +ERROR: schema's disk space quota exceeded with name: 158089 (seg0 127.0.0.1:6002 pid=1027799) -- enlarge the quota limits 1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); set_schema_quota diff --git a/tests/regress/expected/test_activetable_limit.out b/tests/regress/expected/test_activetable_limit.out index 0232f7e64cd..b5cc0fae4f8 100644 --- a/tests/regress/expected/test_activetable_limit.out +++ b/tests/regress/expected/test_activetable_limit.out @@ -36,10 +36,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed -ERROR: schema's disk space quota exceeded with name:s +ERROR: schema's disk space quota exceeded with name: s CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed -ERROR: schema's disk space quota exceeded with name:s +ERROR: schema's disk space quota exceeded with name: s -- Q: why diskquota still works when activetable = 3? -- A: the activetable limit by shmem size, calculate by hash_estimate_size() -- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index b2802aa9b5c..2d4575339db 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -67,9 +67,9 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail. INSERT INTO t_ao SELECT generate_series(1, 10); -ERROR: schema's disk space quota exceeded with name:s_appendonly +ERROR: schema's disk space quota exceeded with name: s_appendonly INSERT INTO t_aoco SELECT generate_series(1, 10); -ERROR: schema's disk space quota exceeded with name:s_appendonly +ERROR: schema's disk space quota exceeded with name: s_appendonly DROP TABLE t_ao; DROP TABLE t_aoco; SET search_path TO DEFAULT; diff --git a/tests/regress/expected/test_clean_blackmap_after_drop.out b/tests/regress/expected/test_clean_blackmap_after_drop.out index e17ff20e6c0..396164ec9c7 100644 --- a/tests/regress/expected/test_clean_blackmap_after_drop.out +++ b/tests/regress/expected/test_clean_blackmap_after_drop.out @@ -20,7 +20,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: role's disk space quota exceeded with name:16574 (seg0 127.0.0.1:6002 pid=356116) +ERROR: role's disk space quota exceeded with name: 16574 (seg0 127.0.0.1:6002 pid=356116) DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok \c contrib_regression diff --git a/tests/regress/expected/test_column.out b/tests/regress/expected/test_column.out index dd07de93e88..185d63b8a05 100644 --- a/tests/regress/expected/test_column.out +++ b/tests/regress/expected/test_column.out @@ -26,10 +26,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:scolumn +ERROR: schema's disk space quota exceeded with name: scolumn ALTER TABLE a2 ADD COLUMN j VARCHAR(50); UPDATE a2 SET j = 'add value for column j'; -ERROR: schema's disk space quota exceeded with name:scolumn +ERROR: schema's disk space quota exceeded with name: scolumn SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -38,7 +38,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert failed after add column INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:scolumn +ERROR: schema's disk space quota exceeded with name: scolumn DROP TABLE a2; RESET search_path; DROP SCHEMA scolumn; diff --git a/tests/regress/expected/test_copy.out b/tests/regress/expected/test_copy.out index b0cde72fe9a..880f73801fd 100644 --- a/tests/regress/expected/test_copy.out +++ b/tests/regress/expected/test_copy.out @@ -22,7 +22,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect copy fail COPY c FROM '/tmp/csmall.txt'; -ERROR: schema's disk space quota exceeded with name:s3 +ERROR: schema's disk space quota exceeded with name: s3 DROP TABLE c; RESET search_path; DROP SCHEMA s3; diff --git a/tests/regress/expected/test_ctas_pause.out b/tests/regress/expected/test_ctas_pause.out index 9c7bfec8447..fd3971328ce 100644 --- a/tests/regress/expected/test_ctas_pause.out +++ b/tests/regress/expected/test_ctas_pause.out @@ -18,7 +18,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ERROR: schema's disk space quota exceeded with name:110528 (seg1 127.0.0.1:6003 pid=73892) +ERROR: schema's disk space quota exceeded with name: 110528 (seg1 127.0.0.1:6003 pid=73892) SELECT diskquota.pause(); pause ------- diff --git a/tests/regress/expected/test_default_tablespace.out b/tests/regress/expected/test_default_tablespace.out index 3e9e78ed9f1..3ab8b74b81d 100644 --- a/tests/regress/expected/test_default_tablespace.out +++ b/tests/regress/expected/test_default_tablespace.out @@ -39,7 +39,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail INSERT INTO t SELECT generate_series(1, 1000000); -ERROR: tablespace:pg_default role:role1 diskquota exceeded +ERROR: tablespace: pg_default, role: role1 diskquota exceeded SELECT r.rolname, t.spcname, b.target_type FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' @@ -75,7 +75,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail because of hard limits INSERT INTO t SELECT generate_series(1, 50000000); -ERROR: tablespace:1663 role:3050113 diskquota exceeded (seg1 127.0.0.1:6003 pid=21307) +ERROR: tablespace: 1663, role: 3050113 diskquota exceeded (seg1 127.0.0.1:6003 pid=21307) DROP TABLE IF EXISTS t; SET ROLE role1; -- database in customized tablespace @@ -110,7 +110,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert to fail INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); -ERROR: tablespace:custom_tablespace role:role1 diskquota exceeded +ERROR: tablespace: custom_tablespace, role: role1 diskquota exceeded SELECT r.rolname, t.spcname, b.target_type FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' @@ -153,7 +153,7 @@ DROP TABLE IF EXISTS t_in_custom_tablespace; NOTICE: table "t_in_custom_tablespace" does not exist, skipping -- expect insert to fail because of hard limits CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); -ERROR: tablespace:3050120 role:3050113 diskquota exceeded (seg0 127.0.0.1:6002 pid=22270) +ERROR: tablespace: 3050120, role: 3050113 diskquota exceeded (seg0 127.0.0.1:6002 pid=22270) -- clean up DROP TABLE IF EXISTS t_in_custom_tablespace; NOTICE: table "t_in_custom_tablespace" does not exist, skipping diff --git a/tests/regress/expected/test_delete_quota.out b/tests/regress/expected/test_delete_quota.out index 76ae43d429e..e8f9f1b952e 100644 --- a/tests/regress/expected/test_delete_quota.out +++ b/tests/regress/expected/test_delete_quota.out @@ -20,7 +20,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect fail INSERT INTO c SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:deleteschema +ERROR: schema's disk space quota exceeded with name: deleteschema SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); set_schema_quota ------------------ diff --git a/tests/regress/expected/test_drop_after_pause.out b/tests/regress/expected/test_drop_after_pause.out index 81aac49c334..1dc7e8ad928 100644 --- a/tests/regress/expected/test_drop_after_pause.out +++ b/tests/regress/expected/test_drop_after_pause.out @@ -46,7 +46,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:16933 (seg2 127.0.0.1:6004 pid=24622) +ERROR: schema's disk space quota exceeded with name: 16933 (seg2 127.0.0.1:6004 pid=24622) \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null SELECT diskquota.pause(); diff --git a/tests/regress/expected/test_drop_table.out b/tests/regress/expected/test_drop_table.out index a91d6d520d6..507d49bac3f 100644 --- a/tests/regress/expected/test_drop_table.out +++ b/tests/regress/expected/test_drop_table.out @@ -24,7 +24,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:sdrtbl +ERROR: schema's disk space quota exceeded with name: sdrtbl DROP TABLE a; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 6afd1980fa7..9a2fd427abd 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -47,7 +47,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx1 CREATE SCHEMA SX; @@ -82,7 +82,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 4 INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; @@ -110,7 +110,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; @@ -138,7 +138,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; @@ -166,7 +166,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; @@ -194,7 +194,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; @@ -222,7 +222,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; @@ -250,7 +250,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; @@ -278,7 +278,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name:sx +ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; diff --git a/tests/regress/expected/test_index.out b/tests/regress/expected/test_index.out index 2799f93f172..a35ec4f95cd 100644 --- a/tests/regress/expected/test_index.out +++ b/tests/regress/expected/test_index.out @@ -105,7 +105,7 @@ SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (r -- expect insert fail INSERT INTO test_index_a SELECT generate_series(1,100); -ERROR: tablespace:indexspc schema:indexschema1 diskquota exceeded +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded -- index tablespace quota exceeded ALTER table test_index_a SET TABLESPACE pg_default; SELECT diskquota.wait_for_worker_new_epoch(); @@ -125,7 +125,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO test_index_a SELECT generate_series(1,100); -ERROR: tablespace:indexspc schema:indexschema1 diskquota exceeded +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded RESET search_path; DROP INDEX indexschema1.a_index; DROP TABLE indexschema1.test_index_a; diff --git a/tests/regress/expected/test_insert_after_drop.out b/tests/regress/expected/test_insert_after_drop.out index 1a1fe8f6970..a1e154a7812 100644 --- a/tests/regress/expected/test_insert_after_drop.out +++ b/tests/regress/expected/test_insert_after_drop.out @@ -23,7 +23,7 @@ SELECT pg_sleep(10); (1 row) INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:sdrtbl +ERROR: schema's disk space quota exceeded with name: sdrtbl DROP EXTENSION diskquota; -- sleep 1 second in case of system slow SELECT pg_sleep(1); diff --git a/tests/regress/expected/test_partition.out b/tests/regress/expected/test_partition.out index b095ff05642..e103bbc9ee5 100644 --- a/tests/regress/expected/test_partition.out +++ b/tests/regress/expected/test_partition.out @@ -40,10 +40,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -ERROR: schema's disk space quota exceeded with name:s8 +ERROR: schema's disk space quota exceeded with name: s8 -- expect insert fail INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; -ERROR: schema's disk space quota exceeded with name:s8 +ERROR: schema's disk space quota exceeded with name: s8 DELETE FROM measurement WHERE logdate='2006-03-02'; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/expected/test_pause_and_resume.out b/tests/regress/expected/test_pause_and_resume.out index 986f59e4c8a..e5b7820484d 100644 --- a/tests/regress/expected/test_pause_and_resume.out +++ b/tests/regress/expected/test_pause_and_resume.out @@ -20,7 +20,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 -- pause extension SELECT diskquota.pause(); pause @@ -58,7 +58,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 -- table size should be updated after resume SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass AND segid = -1; diff --git a/tests/regress/expected/test_pause_and_resume_multiple_db.out b/tests/regress/expected/test_pause_and_resume_multiple_db.out index f501c91181a..d3320f17aac 100644 --- a/tests/regress/expected/test_pause_and_resume_multiple_db.out +++ b/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -37,7 +37,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 \c test_pause_and_resume SELECT diskquota.set_schema_quota('s1', '1 MB'); set_schema_quota @@ -52,7 +52,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 \c contrib_regression SELECT diskquota.pause(); -- pause extension, onle effect current database pause @@ -81,7 +81,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 SELECT diskquota.pause(); -- pause extension, onle effect current database pause ------- @@ -127,7 +127,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 SELECT diskquota.pause(); -- pause extension, onle effect current database pause ------- @@ -155,7 +155,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 \c contrib_regression INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed SELECT diskquota.resume(); @@ -171,7 +171,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 \c test_pause_and_resume SELECT diskquota.pause(); pause diff --git a/tests/regress/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out index 930148fc108..99985501666 100644 --- a/tests/regress/expected/test_primary_failure.out +++ b/tests/regress/expected/test_primary_failure.out @@ -43,7 +43,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:ftsr +ERROR: schema's disk space quota exceeded with name: ftsr -- now one of primary is down select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); pg_ctl @@ -69,7 +69,7 @@ select content, preferred_role, role, status, mode from gp_segment_configuration -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:ftsr +ERROR: schema's disk space quota exceeded with name: ftsr -- increase quota SELECT diskquota.set_schema_quota('ftsr', '200 MB'); set_schema_quota diff --git a/tests/regress/expected/test_rename.out b/tests/regress/expected/test_rename.out index e8a2fcc6e89..ecd470ea3e8 100644 --- a/tests/regress/expected/test_rename.out +++ b/tests/regress/expected/test_rename.out @@ -20,17 +20,17 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs1 +ERROR: schema's disk space quota exceeded with name: srs1 ALTER SCHEMA srs1 RENAME TO srs2; SET search_path TO srs2; -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs2 +ERROR: schema's disk space quota exceeded with name: srs2 -- test rename table ALTER TABLE a RENAME TO a2; -- expect insert fail INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:srs2 +ERROR: schema's disk space quota exceeded with name: srs2 DROP TABLE a2; RESET search_path; DROP SCHEMA srs2; @@ -59,16 +59,16 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole +ERROR: role's disk space quota exceeded with name: srerole ALTER ROLE srerole RENAME TO srerole2; -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole2 +ERROR: role's disk space quota exceeded with name: srerole2 -- test rename table ALTER TABLE a RENAME TO a2; -- expect insert fail INSERT INTO a2 SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name:srerole2 +ERROR: role's disk space quota exceeded with name: srerole2 DROP TABLE a2; DROP ROLE srerole2; RESET search_path; diff --git a/tests/regress/expected/test_reschema.out b/tests/regress/expected/test_reschema.out index 7d1557715ba..1f0e4582828 100644 --- a/tests/regress/expected/test_reschema.out +++ b/tests/regress/expected/test_reschema.out @@ -20,7 +20,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail when exceed quota limit INSERT INTO a SELECT generate_series(1,1000); -ERROR: schema's disk space quota exceeded with name:sre +ERROR: schema's disk space quota exceeded with name: sre -- set schema quota larger SELECT diskquota.set_schema_quota('srE', '1 GB'); set_schema_quota diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index d2ed155a7f2..c15987506ac 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -26,10 +26,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u1 +ERROR: role's disk space quota exceeded with name: u1 -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u1 +ERROR: role's disk space quota exceeded with name: u1 -- Delete role quota SELECT diskquota.set_role_quota('u1', '-1 MB'); set_role_quota @@ -60,7 +60,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u1 +ERROR: role's disk space quota exceeded with name: u1 SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; role_name | quota_in_mb | rolsize_in_bytes -----------+-------------+------------------ diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index a9ca3e887b4..e4116a0b448 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -21,13 +21,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 CREATE TABLE a2(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 -- Test alter table set schema CREATE SCHEMA s2; ALTER TABLE s1.a SET SCHEMA s2; @@ -83,11 +83,11 @@ SELECT size, segid FROM diskquota.table_size -- expect fail INSERT INTO badquota.t1 SELECT generate_series(0, 10); -ERROR: schema's disk space quota exceeded with name:badquota +ERROR: schema's disk space quota exceeded with name: badquota ALTER TABLE s2.a SET SCHEMA badquota; -- expect failed INSERT INTO badquota.a SELECT generate_series(0, 100); -ERROR: schema's disk space quota exceeded with name:badquota +ERROR: schema's disk space quota exceeded with name: badquota SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/expected/test_tablespace_diff_schema.out b/tests/regress/expected/test_tablespace_diff_schema.out index 0276714ddea..65c0036c430 100644 --- a/tests/regress/expected/test_tablespace_diff_schema.out +++ b/tests/regress/expected/test_tablespace_diff_schema.out @@ -31,7 +31,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect to fail INSERT INTO a SELECT generate_series(1,1000000); -ERROR: tablespace:spc_diff_schema schema:schema_in_tablespc diskquota exceeded +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; schema_name | tablespace_name --------------------+----------------- @@ -77,7 +77,7 @@ SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_q -- expect to fail INSERT INTO a SELECT generate_series(1,1000000); -ERROR: tablespace:spc_diff_schema schema:schema_in_tablespc diskquota exceeded +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded reset search_path; DROP TABLE IF EXISTS schema_in_tablespc.a; DROP tablespace IF EXISTS spc_diff_schema; diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index 9c090ffaa34..ea8afc99d07 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -46,10 +46,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -- expect insert fail INSERT INTO b2 SELECT generate_series(1,100); -ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -- Test show_fast_role_tablespace_quota_view SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes @@ -78,7 +78,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -- Test alter tablespace -- start_ignore \! mkdir -p /tmp/rolespc2 @@ -105,7 +105,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -- Test update quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); set_role_tablespace_quota @@ -131,7 +131,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc role:rolespcu1 diskquota exceeded +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); set_role_tablespace_quota diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index d092a7eb3a1..c44317fdb28 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -36,7 +36,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded -- change tablespace role quota SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); set_role_tablespace_quota @@ -73,7 +73,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail by tablespace schema perseg quota INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota -- Test alter owner ALTER TABLE b OWNER TO rolespc_persegu2; SELECT diskquota.wait_for_worker_new_epoch(); @@ -93,7 +93,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota -- Test alter tablespace -- start_ignore \! mkdir -p /tmp/rolespc_perseg2 @@ -120,7 +120,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); set_per_segment_quota @@ -156,7 +156,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota -- Test delete per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); set_per_segment_quota @@ -186,7 +186,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace:rolespc_perseg role:rolespc_persegu1 diskquota exceeded per segment quota +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota -- Test delete quota config SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); set_role_tablespace_quota diff --git a/tests/regress/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out index 00600fa2502..2d5fb1bdf15 100644 --- a/tests/regress/expected/test_tablespace_schema.out +++ b/tests/regress/expected/test_tablespace_schema.out @@ -27,13 +27,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -- Test alter table set schema CREATE SCHEMA spcs2; ALTER TABLE spcs1.a SET SCHEMA spcs2; @@ -56,7 +56,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes -------------+-----------------+-------------+----------------------------- @@ -88,7 +88,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -- Test update quota config SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); set_schema_tablespace_quota @@ -114,7 +114,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc schema:spcs1 diskquota exceeded +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -- Test delete quota config SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); set_schema_tablespace_quota diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index 3af1b0f5c8b..ca99c82750a 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -26,7 +26,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail by tablespace schema diskquota INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded -- change tablespace schema quota SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); set_schema_tablespace_quota @@ -62,7 +62,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail by tablespace schema perseg quota INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota -- Test alter table set schema CREATE SCHEMA spcs2_perseg; ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; @@ -83,7 +83,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+------------------+-------------+----------------------------- @@ -115,7 +115,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); set_per_segment_quota @@ -145,7 +145,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota -- Test delete per segment ratio SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); set_per_segment_quota @@ -175,7 +175,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); ---- expect insert fail INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace:schemaspc_perseg schema:spcs1_perseg diskquota exceeded per segment quota +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota -- Test delete tablespace schema quota SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); set_per_segment_quota diff --git a/tests/regress/expected/test_temp_role.out b/tests/regress/expected/test_temp_role.out index 35d1a140378..5a2462a596b 100644 --- a/tests/regress/expected/test_temp_role.out +++ b/tests/regress/expected/test_temp_role.out @@ -27,7 +27,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expected failed: INSERT INTO a SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name:u3temp +ERROR: role's disk space quota exceeded with name: u3temp DROP TABLE ta; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/expected/test_toast.out b/tests/regress/expected/test_toast.out index d30ce8f7bd7..92068c9785d 100644 --- a/tests/regress/expected/test_toast.out +++ b/tests/regress/expected/test_toast.out @@ -27,7 +27,7 @@ SELECT (SELECT string_agg(chr(floor(random() * 26)::int + 65), '') FROM generate_series(1,1000)) FROM generate_series(1,1000); -ERROR: schema's disk space quota exceeded with name:s5 +ERROR: schema's disk space quota exceeded with name: s5 DROP TABLE a5; RESET search_path; DROP SCHEMA s5; diff --git a/tests/regress/expected/test_truncate.out b/tests/regress/expected/test_truncate.out index 59a51e98fb6..c380b4c47ba 100644 --- a/tests/regress/expected/test_truncate.out +++ b/tests/regress/expected/test_truncate.out @@ -22,9 +22,9 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,30); -ERROR: schema's disk space quota exceeded with name:s7 +ERROR: schema's disk space quota exceeded with name: s7 INSERT INTO b SELECT generate_series(1,30); -ERROR: schema's disk space quota exceeded with name:s7 +ERROR: schema's disk space quota exceeded with name: s7 TRUNCATE TABLE a; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/regress/expected/test_update.out b/tests/regress/expected/test_update.out index 58d2534d5e4..2c135cc671e 100644 --- a/tests/regress/expected/test_update.out +++ b/tests/regress/expected/test_update.out @@ -19,7 +19,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect update fail. UPDATE a SET i = 100; -ERROR: schema's disk space quota exceeded with name:s4 +ERROR: schema's disk space quota exceeded with name: s4 DROP TABLE a; RESET search_path; DROP SCHEMA s4; diff --git a/tests/regress/expected/test_vacuum.out b/tests/regress/expected/test_vacuum.out index 5099a1cada2..b35e8519d7c 100644 --- a/tests/regress/expected/test_vacuum.out +++ b/tests/regress/expected/test_vacuum.out @@ -22,10 +22,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:s6 +ERROR: schema's disk space quota exceeded with name: s6 -- expect insert fail INSERT INTO b SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name:s6 +ERROR: schema's disk space quota exceeded with name: s6 DELETE FROM a WHERE i > 10; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out b/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out index 57da8569dc1..5bf36f408e6 100644 --- a/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out +++ b/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out @@ -2,9 +2,9 @@ -- FIXME add version check here \!sleep 5 insert into s1.a select generate_series(1, 100); -- fail -ERROR: schema's disk space quota exceeded with name:s1 +ERROR: schema's disk space quota exceeded with name: s1 insert into srole.b select generate_series(1, 100); -- fail -ERROR: role's disk space quota exceeded with name:u1 +ERROR: role's disk space quota exceeded with name: u1 drop table s1.a, srole.b; drop schema s1, srole; drop role u1; From 7a2a665015138dab6b0ac78777746ad603add248 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 21 Apr 2022 10:59:02 +0800 Subject: [PATCH 192/330] Fix change tablespace test case (#213) * Fix change tablespace test case "ALTER TABLE xx SET TABLESPACE" will change the relfilenode of a table, it will copy files from old relfilenode directory to the new one and delete all files when commiting the transaction. As diskquota doesn't acquire a lock on the relation when fetching table size, so if diskquota is collecting active tables' size and another session is deleting an active table's files under an old relfilenode directory in a changing tablespace transaction, the diskquota can't get all segment's table size as files on part of segments have been deleted. To fix it, we try to make a table to be an active table by inserting data into it and wait for the diskquota to recalculate the table size after changing tablespace. --- tests/regress/expected/test_tablespace_role_perseg.out | 10 ++++++++-- tests/regress/sql/test_tablespace_role_perseg.sql | 5 +++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index c44317fdb28..9deaf41f8ec 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -134,14 +134,20 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------------+-----------------+-------------+----------------------------- rolespc_persegu1 | rolespc_perseg | 10 | 4063232 (1 row) --- expect insert success -INSERT INTO b SELECT generate_series(1,100); SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); set_per_segment_quota ----------------------- diff --git a/tests/regress/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql index aa11f749e2a..4c49f7bfbb3 100644 --- a/tests/regress/sql/test_tablespace_role_perseg.sql +++ b/tests/regress/sql/test_tablespace_role_perseg.sql @@ -64,10 +64,11 @@ INSERT INTO b SELECT generate_series(1,100); -- Test update per segment ratio SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); SELECT diskquota.wait_for_worker_new_epoch(); -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - -- expect insert success INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert fail From f1be960db271db751d78276a80cf1f8e1df11aa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Fri, 22 Apr 2022 09:35:09 +0800 Subject: [PATCH 193/330] Replace "black" with "reject" for clearity (#214) * Replace "black" with "reject" for clearity * Rename in sql files * Rename test files * Format .c files * Re-format using v13 * Update expected test results * Fix result of upgrade test * Re-trigger CI * Add newline at EOF * Change "list" to "map" for consistency --- README.md | 8 +- diskquota--1.0--2.0.sql | 10 +- diskquota--2.0--1.0.sql | 10 +- diskquota--2.0.sql | 10 +- diskquota.c | 12 +- diskquota.h | 4 +- gp_activetable.c | 4 +- quotamodel.c | 596 +++++++++--------- .../{test_blackmap.out => test_rejectmap.out} | 368 +++++------ tests/isolation2/isolation2_schedule | 2 +- .../{test_blackmap.sql => test_rejectmap.sql} | 318 +++++----- tests/regress/diskquota_schedule | 4 +- ...ut => test_clean_rejectmap_after_drop.out} | 6 +- .../expected/test_ctas_before_set_quota.out | 4 +- .../expected/test_ctas_no_preload_lib.out | 6 +- .../expected/test_default_tablespace.out | 4 +- .../{test_blackmap.out => test_rejectmap.out} | 46 +- .../expected/test_tablespace_diff_schema.out | 2 +- ...ql => test_clean_rejectmap_after_drop.sql} | 6 +- .../sql/test_ctas_before_set_quota.sql | 4 +- .../regress/sql/test_ctas_no_preload_lib.sql | 6 +- tests/regress/sql/test_default_tablespace.sql | 4 +- .../{test_blackmap.sql => test_rejectmap.sql} | 46 +- .../sql/test_tablespace_diff_schema.sql | 2 +- upgrade_test/expected/2.0_catalog.out | 308 ++++----- 25 files changed, 896 insertions(+), 894 deletions(-) rename tests/isolation2/expected/{test_blackmap.out => test_rejectmap.out} (63%) rename tests/isolation2/sql/{test_blackmap.sql => test_rejectmap.sql} (72%) rename tests/regress/expected/{test_clean_blackmap_after_drop.out => test_clean_rejectmap_after_drop.out} (86%) rename tests/regress/expected/{test_blackmap.out => test_rejectmap.out} (90%) rename tests/regress/sql/{test_clean_blackmap_after_drop.sql => test_clean_rejectmap_after_drop.sql} (80%) rename tests/regress/sql/{test_blackmap.sql => test_rejectmap.sql} (87%) diff --git a/README.md b/README.md index 135c637116a..baaf2ba8e8f 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,8 @@ database, and do quota enforcement. It will periodically (can be set via diskquota.naptime) recalculate the table size of active tables, and update their corresponding schema or owner's disk usage. Then compare with quota limit for those schemas or roles. If exceeds the limit, put the corresponding -schemas or roles into the blacklist in shared memory. Schemas or roles in -blacklist are used to do query enforcement to cancel queries which plan to +schemas or roles into the rejectmap in shared memory. Schemas or roles in +rejectmap are used to do query enforcement to cancel queries which plan to load data into these schemas or roles. From MPP perspective, diskquota launcher and worker processes are all run at @@ -304,7 +304,7 @@ show_fast_schema_quota_view and show_fast_role_quota_view. 3. Out of shared memory Diskquota extension uses two kinds of shared memories. One is used to save -black list and another one is to save active table list. The black list shared +rejectmap and another one is to save active table list. The rejectmap shared memory can support up to 1 MiB database objects which exceed quota limit. The active table list shared memory can support up to 1 MiB active tables in default, and user could reset it in GUC diskquota_max_active_tables. @@ -312,7 +312,7 @@ default, and user could reset it in GUC diskquota_max_active_tables. As shared memory is pre-allocated, user needs to restart DB if they updated this GUC value. -If black list shared memory is full, it's possible to load data into some +If rejectmap shared memory is full, it's possible to load data into some schemas or roles which quota limit are reached. If active table shared memory is full, disk quota worker may failed to detect the corresponding disk usage change in time. diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 1b3378a3899..40a7969d75e 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -24,7 +24,7 @@ ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tabl -- type define ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; -CREATE TYPE diskquota.blackmap_entry AS ( +CREATE TYPE diskquota.rejectmap_entry AS ( target_oid oid, database_oid oid, tablespace_oid oid, @@ -32,7 +32,7 @@ CREATE TYPE diskquota.blackmap_entry AS ( seg_exceeded boolean ); -CREATE TYPE diskquota.blackmap_entry_detail AS ( +CREATE TYPE diskquota.rejectmap_entry_detail AS ( target_type text, target_oid oid, database_oid oid, @@ -70,8 +70,8 @@ CREATE TYPE diskquota.relation_cache_detail AS ( CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.show_blackmap() RETURNS setof diskquota.blackmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_blackmap' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; @@ -103,7 +103,7 @@ CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota. -- UDF end -- views -CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; /* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS SELECT ( diff --git a/diskquota--2.0--1.0.sql b/diskquota--2.0--1.0.sql index 2675171d114..96338a81ceb 100644 --- a/diskquota--2.0--1.0.sql +++ b/diskquota--2.0--1.0.sql @@ -1,7 +1,7 @@ -- TODO check if worker should not refresh, current lib should be diskquota.so -- views -DROP VIEW diskquota.blackmap; +DROP VIEW diskquota.rejectmap; DROP VIEW diskquota.show_fast_schema_tablespace_quota_view; DROP VIEW diskquota.show_fast_role_tablespace_quota_view; DROP VIEW diskquota.show_segment_ratio_quota_view; @@ -45,8 +45,8 @@ GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; DROP FUNCTION diskquota.set_schema_tablespace_quota(text, text, text); DROP FUNCTION diskquota.set_role_tablespace_quota(text, text, text); DROP FUNCTION diskquota.set_per_segment_quota(text, float4); -DROP FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]); -DROP FUNCTION diskquota.show_blackmap(); +DROP FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]); +DROP FUNCTION diskquota.show_rejectmap(); DROP FUNCTION diskquota.pause(); DROP FUNCTION diskquota.resume(); DROP FUNCTION diskquota.show_worker_epoch(); @@ -81,7 +81,7 @@ ALTER TABLE diskquota.table_size DROP COLUMN segid; -- type part ALTER TYPE diskquota.diskquota_active_table_type DROP ATTRIBUTE "GP_SEGMENT_ID"; -DROP TYPE diskquota.blackmap_entry; -DROP TYPE diskquota.blackmap_entry_detail; +DROP TYPE diskquota.rejectmap_entry; +DROP TYPE diskquota.rejectmap_entry_detail; DROP TYPE diskquota.relation_cache_detail; -- type part end diff --git a/diskquota--2.0.sql b/diskquota--2.0.sql index 67d8aa22fbc..0587fc77744 100644 --- a/diskquota--2.0.sql +++ b/diskquota--2.0.sql @@ -45,7 +45,7 @@ CREATE TYPE diskquota.diskquota_active_table_type AS ( "GP_SEGMENT_ID" smallint ); -CREATE TYPE diskquota.blackmap_entry AS ( +CREATE TYPE diskquota.rejectmap_entry AS ( target_oid oid, database_oid oid, tablespace_oid oid, @@ -53,7 +53,7 @@ CREATE TYPE diskquota.blackmap_entry AS ( seg_exceeded boolean ); -CREATE TYPE diskquota.blackmap_entry_detail AS ( +CREATE TYPE diskquota.rejectmap_entry_detail AS ( target_type text, target_oid oid, database_oid oid, @@ -86,8 +86,8 @@ CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.refresh_blackmap(diskquota.blackmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.show_blackmap() RETURNS setof diskquota.blackmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_blackmap' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; @@ -181,7 +181,7 @@ SELECT ( (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) ) AS dbsize; -CREATE VIEW diskquota.blackmap AS SELECT * FROM diskquota.show_blackmap() AS BM; +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS WITH diff --git a/diskquota.c b/diskquota.c index a0801ec0a04..c552acc84f6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -114,7 +114,7 @@ static void terminate_all_workers(void); static void on_add_db(Oid dbid, MessageResult *code); static void on_del_db(Oid dbid, MessageResult *code); static bool is_valid_dbid(Oid dbid); -extern void invalidate_database_blackmap(Oid dbid); +extern void invalidate_database_rejectmap(Oid dbid); /* * Entrypoint of diskquota module. @@ -376,8 +376,8 @@ disk_quota_worker_main(Datum main_arg) if (got_sigterm) { ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); - /* clear the out-of-quota blacklist in shared memory */ - invalidate_database_blackmap(MyDatabaseId); + /* clear the out-of-quota rejectmap in shared memory */ + invalidate_database_rejectmap(MyDatabaseId); proc_exit(0); } @@ -431,8 +431,8 @@ disk_quota_worker_main(Datum main_arg) } ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); - /* clear the out-of-quota blacklist in shared memory */ - invalidate_database_blackmap(MyDatabaseId); + /* clear the out-of-quota rejectmap in shared memory */ + invalidate_database_rejectmap(MyDatabaseId); proc_exit(0); } @@ -853,7 +853,7 @@ on_add_db(Oid dbid, MessageResult *code) * do: * 1. kill the associated worker process * 2. delete dbid from diskquota_namespace.database_list - * 3. invalidate black-map entries and monitoring_dbid_cache from shared memory + * 3. invalidate reject-map entries and monitoring_dbid_cache from shared memory */ static void on_del_db(Oid dbid, MessageResult *code) diff --git a/diskquota.h b/diskquota.h index 69fb339e6fe..a308de59e77 100644 --- a/diskquota.h +++ b/diskquota.h @@ -68,7 +68,7 @@ typedef enum struct DiskQuotaLocks { LWLock *active_table_lock; - LWLock *black_map_lock; + LWLock *reject_map_lock; LWLock *extension_ddl_message_lock; LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ LWLock *monitoring_dbid_cache_lock; @@ -151,7 +151,7 @@ extern void register_diskquota_object_access_hook(void); /* enforcement interface*/ extern void init_disk_quota_enforcement(void); -extern void invalidate_database_blackmap(Oid dbid); +extern void invalidate_database_rejectmap(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); diff --git a/gp_activetable.c b/gp_activetable.c index 7ff2e8270aa..4da2f7e9db8 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -177,7 +177,7 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, { if (get_extension_oid("diskquota", true) == objectId) { - invalidate_database_blackmap(MyDatabaseId); + invalidate_database_rejectmap(MyDatabaseId); } } @@ -787,7 +787,7 @@ get_active_tables_oid(void) /* * Load table size info from diskquota.table_size table. - * This is called when system startup, disk quota black list + * This is called when system startup, disk quota rejectmap * and other shared memory will be warmed up by table_size table. */ static void diff --git a/quotamodel.c b/quotamodel.c index 3097d08b5a7..3cb714c8933 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -41,23 +41,23 @@ #include "cdb/cdbdispatchresult.h" #include "cdb/cdbutil.h" -/* cluster level max size of black list */ -#define MAX_DISK_QUOTA_BLACK_ENTRIES (1024 * 1024) -/* cluster level init size of black list */ -#define INIT_DISK_QUOTA_BLACK_ENTRIES 8192 -/* per database level max size of black list */ -#define MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES 8192 +/* cluster level max size of rejectmap */ +#define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) +/* cluster level init size of rejectmap */ +#define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 +/* per database level max size of rejectmap */ +#define MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES 8192 #define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 6 -typedef struct TableSizeEntry TableSizeEntry; -typedef struct NamespaceSizeEntry NamespaceSizeEntry; -typedef struct RoleSizeEntry RoleSizeEntry; -typedef struct QuotaLimitEntry QuotaLimitEntry; -typedef struct BlackMapEntry BlackMapEntry; -typedef struct GlobalBlackMapEntry GlobalBlackMapEntry; -typedef struct LocalBlackMapEntry LocalBlackMapEntry; +typedef struct TableSizeEntry TableSizeEntry; +typedef struct NamespaceSizeEntry NamespaceSizeEntry; +typedef struct RoleSizeEntry RoleSizeEntry; +typedef struct QuotaLimitEntry QuotaLimitEntry; +typedef struct RejectMapEntry RejectMapEntry; +typedef struct GlobalRejectMapEntry GlobalRejectMapEntry; +typedef struct LocalRejectMapEntry LocalRejectMapEntry; int SEGCOUNT = 0; /* @@ -116,8 +116,8 @@ struct QuotaInfo quota_info[NUM_QUOTA_TYPES] = { [TABLESPACE_QUOTA] = { .map_name = "Tablespace map", .num_keys = 1, .sys_cache = (Oid[]){TABLESPACEOID}, .map = NULL}}; -/* global blacklist for which exceed their quota limit */ -struct BlackMapEntry +/* global rejectmap for which exceed their quota limit */ +struct RejectMapEntry { Oid targetoid; Oid databaseoid; @@ -125,16 +125,16 @@ struct BlackMapEntry uint32 targettype; /* * TODO refactor this data structure - * QD index the blackmap by (targetoid, databaseoid, tablespaceoid, targettype). - * QE index the blackmap by (relfilenode). + * QD index the rejectmap by (targetoid, databaseoid, tablespaceoid, targettype). + * QE index the rejectmap by (relfilenode). */ RelFileNode relfilenode; }; -struct GlobalBlackMapEntry +struct GlobalRejectMapEntry { - BlackMapEntry keyitem; - bool segexceeded; + RejectMapEntry keyitem; + bool segexceeded; /* * When the quota limit is exceeded on segment servers, * we need an extra auxiliary field to preserve the quota @@ -142,23 +142,23 @@ struct GlobalBlackMapEntry * servers, e.g., targettype, targetoid. This field is * useful on segment servers. */ - BlackMapEntry auxblockinfo; + RejectMapEntry auxblockinfo; }; -/* local blacklist for which exceed their quota limit */ -struct LocalBlackMapEntry +/* local rejectmap for which exceed their quota limit */ +struct LocalRejectMapEntry { - BlackMapEntry keyitem; - bool isexceeded; - bool segexceeded; + RejectMapEntry keyitem; + bool isexceeded; + bool segexceeded; }; /* using hash table to support incremental update the table size entry.*/ static HTAB *table_size_map = NULL; -/* black list for database objects which exceed their quota limit */ -static HTAB *disk_quota_black_map = NULL; -static HTAB *local_disk_quota_black_map = NULL; +/* rejectmap for database objects which exceed their quota limit */ +static HTAB *disk_quota_reject_map = NULL; +static HTAB *local_disk_quota_reject_map = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; @@ -167,7 +167,7 @@ static void init_all_quota_maps(void); static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid); static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys); static void remove_quota(QuotaType type, Oid *keys, int16 segid); -static void add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); +static void add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); static void check_quota_map(QuotaType type); static void clear_all_quota_maps(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); @@ -176,8 +176,8 @@ static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_k static void refresh_disk_quota_usage(bool is_init); static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); static void flush_to_table_size(void); -static void flush_local_black_map(void); -static void dispatch_blackmap(HTAB *local_active_table_stat_map); +static void flush_local_reject_map(void); +static void dispatch_rejectmap(HTAB *local_active_table_stat_map); static bool load_quotas(void); static void do_load_quotas(void); static bool do_check_diskquota_state_is_ready(void); @@ -186,7 +186,7 @@ static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); static void init_lwlocks(void); -static void export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name); +static void export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name); void truncateStringInfo(StringInfo str, int nchars); static void @@ -267,28 +267,28 @@ remove_quota(QuotaType type, Oid *keys, int16 segid) /* * Compare the disk quota limit and current usage of a database object. - * Put them into local blacklist if quota limit is exceeded. + * Put them into local rejectmap if quota limit is exceeded. */ static void -add_quota_to_blacklist(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded) +add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded) { - LocalBlackMapEntry *localblackentry; - BlackMapEntry keyitem = {0}; + LocalRejectMapEntry *localrejectentry; + RejectMapEntry keyitem = {0}; keyitem.targetoid = targetOid; keyitem.databaseoid = MyDatabaseId; keyitem.tablespaceoid = tablespaceoid; keyitem.targettype = (uint32)type; - ereport(DEBUG1, (errmsg("[diskquota] Put object %u to blacklist", targetOid))); - localblackentry = (LocalBlackMapEntry *)hash_search(local_disk_quota_black_map, &keyitem, HASH_ENTER, NULL); - localblackentry->isexceeded = true; - localblackentry->segexceeded = segexceeded; + ereport(DEBUG1, (errmsg("[diskquota] Put object %u to rejectmap", targetOid))); + localrejectentry = (LocalRejectMapEntry *)hash_search(local_disk_quota_reject_map, &keyitem, HASH_ENTER, NULL); + localrejectentry->isexceeded = true; + localrejectentry->segexceeded = segexceeded; } /* * Check the quota map, if the entry doesn't exist anymore, * remove it from the map. Otherwise, check if it has hit - * the quota limit, if it does, add it to the black list. + * the quota limit, if it does, add it to the rejectmap. */ static void check_quota_map(QuotaType type) @@ -326,7 +326,7 @@ check_quota_map(QuotaType type) : InvalidOid; bool segmentExceeded = entry->segid == -1 ? false : true; - add_quota_to_blacklist(type, targetOid, tablespaceoid, segmentExceeded); + add_quota_to_rejectmap(type, targetOid, tablespaceoid, segmentExceeded); } } } @@ -396,20 +396,20 @@ disk_quota_shmem_startup(void) /* * Four shared memory data. extension_ddl_message is used to handle - * diskquota extension create/drop command. disk_quota_black_map is used - * to store out-of-quota blacklist. active_tables_map is used to store + * diskquota extension create/drop command. disk_quota_reject_map is used + * to store out-of-quota rejectmap. active_tables_map is used to store * active tables whose disk usage is changed. */ extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); if (!found) memset((void *)extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(BlackMapEntry); - hash_ctl.entrysize = sizeof(GlobalBlackMapEntry); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(GlobalRejectMapEntry); hash_ctl.hash = tag_hash; - disk_quota_black_map = ShmemInitHash("blackmap whose quota limitation is reached", INIT_DISK_QUOTA_BLACK_ENTRIES, - MAX_DISK_QUOTA_BLACK_ENTRIES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + disk_quota_reject_map = ShmemInitHash("rejectmap whose quota limitation is reached", INIT_DISK_QUOTA_REJECT_ENTRIES, + MAX_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); init_shm_worker_active_tables(); @@ -438,7 +438,7 @@ disk_quota_shmem_startup(void) /* * Initialize four shared memory locks. * active_table_lock is used to access active table map. - * black_map_lock is used to access out-of-quota blacklist. + * reject_map_lock is used to access out-of-quota rejectmap. * extension_ddl_message_lock is used to access content of * extension_ddl_message. * extension_ddl_lock is used to avoid concurrent diskquota @@ -449,7 +449,7 @@ static void init_lwlocks(void) { diskquota_locks.active_table_lock = LWLockAssign(); - diskquota_locks.black_map_lock = LWLockAssign(); + diskquota_locks.reject_map_lock = LWLockAssign(); diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_ddl_lock = LWLockAssign(); diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); @@ -468,7 +468,7 @@ DiskQuotaShmemSize(void) Size size; size = sizeof(ExtensionDDLMessage); - size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_BLACK_ENTRIES, sizeof(GlobalBlackMapEntry))); + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_REJECT_ENTRIES, sizeof(GlobalRejectMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); @@ -499,17 +499,17 @@ init_disk_quota_model(void) init_all_quota_maps(); /* - * local diskquota black map is used to reduce the lock hold time of - * blackmap in shared memory + * local diskquota reject map is used to reduce the lock hold time of + * rejectmap in shared memory */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(BlackMapEntry); - hash_ctl.entrysize = sizeof(LocalBlackMapEntry); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(LocalRejectMapEntry); hash_ctl.hcxt = CurrentMemoryContext; hash_ctl.hash = tag_hash; - local_disk_quota_black_map = - hash_create("local blackmap whose quota limitation is reached", MAX_LOCAL_DISK_QUOTA_BLACK_ENTRIES, + local_disk_quota_reject_map = + hash_create("local rejectmap whose quota limitation is reached", MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } @@ -662,7 +662,7 @@ refresh_disk_quota_model(bool is_init) /* * Update the disk usage of namespace, role and tablespace. - * Put the exceeded namespace and role into shared black map. + * Put the exceeded namespace and role into shared reject map. * Parameter 'is_init' is true when it's the first time that worker * process is constructing quota model. */ @@ -704,10 +704,10 @@ refresh_disk_quota_usage(bool is_init) } /* flush local table_size_map to user table table_size */ flush_to_table_size(); - /* copy local black map back to shared black map */ - flush_local_black_map(); - /* Dispatch blackmap entries to segments to perform hard-limit. */ - if (diskquota_hardlimit) dispatch_blackmap(local_active_table_stat_map); + /* copy local reject map back to shared reject map */ + flush_local_reject_map(); + /* Dispatch rejectmap entries to segments to perform hard-limit. */ + if (diskquota_hardlimit) dispatch_rejectmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } PG_CATCH(); @@ -1045,67 +1045,67 @@ flush_to_table_size(void) } /* - * Generate the new shared blacklist from the local_black_list which + * Generate the new shared rejectmap from the local_rejectmap which * exceed the quota limit. - * local_black_list is used to reduce the lock contention. + * local_rejectmap is used to reduce the lock contention. */ static void -flush_local_black_map(void) +flush_local_reject_map(void) { - HASH_SEQ_STATUS iter; - LocalBlackMapEntry *localblackentry; - GlobalBlackMapEntry *blackentry; - bool found; + HASH_SEQ_STATUS iter; + LocalRejectMapEntry *localrejectentry; + GlobalRejectMapEntry *rejectentry; + bool found; - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); - hash_seq_init(&iter, local_disk_quota_black_map); - while ((localblackentry = hash_seq_search(&iter)) != NULL) + hash_seq_init(&iter, local_disk_quota_reject_map); + while ((localrejectentry = hash_seq_search(&iter)) != NULL) { - if (localblackentry->isexceeded) + if (localrejectentry->isexceeded) { - blackentry = (GlobalBlackMapEntry *)hash_search(disk_quota_black_map, (void *)&localblackentry->keyitem, - HASH_ENTER_NULL, &found); - if (blackentry == NULL) + rejectentry = (GlobalRejectMapEntry *)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, + HASH_ENTER_NULL, &found); + if (rejectentry == NULL) { - ereport(WARNING, (errmsg("[diskquota] Shared disk quota black map size limit reached." + ereport(WARNING, (errmsg("[diskquota] Shared disk quota reject map size limit reached." "Some out-of-limit schemas or roles will be lost" - "in blacklist."))); + "in rejectmap."))); } else { /* new db objects which exceed quota limit */ if (!found) { - blackentry->keyitem.targetoid = localblackentry->keyitem.targetoid; - blackentry->keyitem.databaseoid = MyDatabaseId; - blackentry->keyitem.targettype = localblackentry->keyitem.targettype; - blackentry->keyitem.tablespaceoid = localblackentry->keyitem.tablespaceoid; - blackentry->segexceeded = localblackentry->segexceeded; + rejectentry->keyitem.targetoid = localrejectentry->keyitem.targetoid; + rejectentry->keyitem.databaseoid = MyDatabaseId; + rejectentry->keyitem.targettype = localrejectentry->keyitem.targettype; + rejectentry->keyitem.tablespaceoid = localrejectentry->keyitem.tablespaceoid; + rejectentry->segexceeded = localrejectentry->segexceeded; } } - blackentry->segexceeded = localblackentry->segexceeded; - localblackentry->isexceeded = false; - localblackentry->segexceeded = false; + rejectentry->segexceeded = localrejectentry->segexceeded; + localrejectentry->isexceeded = false; + localrejectentry->segexceeded = false; } else { /* db objects are removed or under quota limit in the new loop */ - (void)hash_search(disk_quota_black_map, (void *)&localblackentry->keyitem, HASH_REMOVE, NULL); - (void)hash_search(local_disk_quota_black_map, (void *)&localblackentry->keyitem, HASH_REMOVE, NULL); + (void)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); + (void)hash_search(local_disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); } } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); } /* - * Dispatch blackmap to segment servers. + * Dispatch rejectmap to segment servers. */ static void -dispatch_blackmap(HTAB *local_active_table_stat_map) +dispatch_rejectmap(HTAB *local_active_table_stat_map) { HASH_SEQ_STATUS hash_seq; - GlobalBlackMapEntry *blackmap_entry; + GlobalRejectMapEntry *rejectmap_entry; DiskQuotaActiveTableEntry *active_table_entry; int num_entries, count = 0; CdbPgResults cdb_pgresults = {NULL, 0}; @@ -1117,18 +1117,18 @@ dispatch_blackmap(HTAB *local_active_table_stat_map) initStringInfo(&active_oids); initStringInfo(&sql); - LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); - num_entries = hash_get_num_entries(disk_quota_black_map); - hash_seq_init(&hash_seq, disk_quota_black_map); - while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + num_entries = hash_get_num_entries(disk_quota_reject_map); + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmap_entry = hash_seq_search(&hash_seq)) != NULL) { - appendStringInfo(&rows, "ROW(%d, %d, %d, %d, %s)", blackmap_entry->keyitem.targetoid, - blackmap_entry->keyitem.databaseoid, blackmap_entry->keyitem.tablespaceoid, - blackmap_entry->keyitem.targettype, blackmap_entry->segexceeded ? "true" : "false"); + appendStringInfo(&rows, "ROW(%d, %d, %d, %d, %s)", rejectmap_entry->keyitem.targetoid, + rejectmap_entry->keyitem.databaseoid, rejectmap_entry->keyitem.tablespaceoid, + rejectmap_entry->keyitem.targettype, rejectmap_entry->segexceeded ? "true" : "false"); if (++count != num_entries) appendStringInfo(&rows, ","); } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); count = 0; num_entries = hash_get_num_entries(local_active_table_stat_map); @@ -1141,8 +1141,8 @@ dispatch_blackmap(HTAB *local_active_table_stat_map) } appendStringInfo(&sql, - "select diskquota.refresh_blackmap(" - "ARRAY[%s]::diskquota.blackmap_entry[], " + "select diskquota.refresh_rejectmap(" + "ARRAY[%s]::diskquota.rejectmap_entry[], " "ARRAY[%s]::oid[])", rows.data, active_oids.data); CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); @@ -1366,43 +1366,43 @@ get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname) } static bool -check_blackmap_by_relfilenode(RelFileNode relfilenode) +check_rejectmap_by_relfilenode(RelFileNode relfilenode) { - bool found; - BlackMapEntry keyitem; - GlobalBlackMapEntry *entry; + bool found; + RejectMapEntry keyitem; + GlobalRejectMapEntry *entry; - SIMPLE_FAULT_INJECTOR("check_blackmap_by_relfilenode"); + SIMPLE_FAULT_INJECTOR("check_rejectmap_by_relfilenode"); memset(&keyitem, 0, sizeof(keyitem)); memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); - LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); - entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + entry = hash_search(disk_quota_reject_map, &keyitem, HASH_FIND, &found); if (found && entry) { - GlobalBlackMapEntry segblackentry; - memcpy(&segblackentry.keyitem, &entry->auxblockinfo, sizeof(BlackMapEntry)); - segblackentry.segexceeded = entry->segexceeded; - LWLockRelease(diskquota_locks.black_map_lock); + GlobalRejectMapEntry segrejectentry; + memcpy(&segrejectentry.keyitem, &entry->auxblockinfo, sizeof(RejectMapEntry)); + segrejectentry.segexceeded = entry->segexceeded; + LWLockRelease(diskquota_locks.reject_map_lock); - export_exceeded_error(&segblackentry, true /*skip_name*/); + export_exceeded_error(&segrejectentry, true /*skip_name*/); return false; } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); return true; } /* * This function takes relowner, relnamespace, reltablespace as arguments, - * prepares the searching key of the global blackmap for us. + * prepares the searching key of the global rejectmap for us. */ static void -prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, Oid relowner, Oid relnamespace, Oid reltablespace) +prepare_rejectmap_search_key(RejectMapEntry *keyitem, QuotaType type, Oid relowner, Oid relnamespace, Oid reltablespace) { Assert(keyitem != NULL); - memset(keyitem, 0, sizeof(BlackMapEntry)); + memset(keyitem, 0, sizeof(RejectMapEntry)); if (type == ROLE_QUOTA || type == ROLE_TABLESPACE_QUOTA) keyitem->targetoid = relowner; else if (type == NAMESPACE_QUOTA || type == NAMESPACE_TABLESPACE_QUOTA) @@ -1416,7 +1416,7 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, Oid relowner keyitem->tablespaceoid = reltablespace; else { - /* refer to add_quota_to_blacklist */ + /* refer to add_quota_to_rejectmap */ keyitem->tablespaceoid = InvalidOid; } keyitem->databaseoid = MyDatabaseId; @@ -1429,14 +1429,14 @@ prepare_blackmap_search_key(BlackMapEntry *keyitem, QuotaType type, Oid relowner * Do enforcement if quota exceeds. */ static bool -check_blackmap_by_reloid(Oid reloid) +check_rejectmap_by_reloid(Oid reloid) { - Oid ownerOid = InvalidOid; - Oid nsOid = InvalidOid; - Oid tablespaceoid = InvalidOid; - bool found; - BlackMapEntry keyitem; - GlobalBlackMapEntry *entry; + Oid ownerOid = InvalidOid; + Oid nsOid = InvalidOid; + Oid tablespaceoid = InvalidOid; + bool found; + RejectMapEntry keyitem; + GlobalRejectMapEntry *entry; bool found_rel = get_rel_owner_schema_tablespace(reloid, &ownerOid, &nsOid, &tablespaceoid); if (!found_rel) @@ -1444,19 +1444,19 @@ check_blackmap_by_reloid(Oid reloid) return true; } - LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - prepare_blackmap_search_key(&keyitem, type, ownerOid, nsOid, tablespaceoid); - entry = hash_search(disk_quota_black_map, &keyitem, HASH_FIND, &found); + prepare_rejectmap_search_key(&keyitem, type, ownerOid, nsOid, tablespaceoid); + entry = hash_search(disk_quota_reject_map, &keyitem, HASH_FIND, &found); if (found) { - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); export_exceeded_error(entry, false /*skip_name*/); return false; } } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); return true; } @@ -1474,7 +1474,7 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) if (diskquota_is_paused()) return true; - if (OidIsValid(reloid)) return check_blackmap_by_reloid(reloid); + if (OidIsValid(reloid)) return check_rejectmap_by_reloid(reloid); enable_hardlimit = diskquota_hardlimit; @@ -1482,30 +1482,30 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; #endif - if (relfilenode && enable_hardlimit) return check_blackmap_by_relfilenode(*relfilenode); + if (relfilenode && enable_hardlimit) return check_rejectmap_by_relfilenode(*relfilenode); return true; } /* - * invalidate all black entry with a specific dbid in SHM + * invalidate all reject entry with a specific dbid in SHM */ void -invalidate_database_blackmap(Oid dbid) +invalidate_database_rejectmap(Oid dbid) { - BlackMapEntry *entry; + RejectMapEntry *entry; HASH_SEQ_STATUS iter; - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); - hash_seq_init(&iter, disk_quota_black_map); + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + hash_seq_init(&iter, disk_quota_reject_map); while ((entry = hash_seq_search(&iter)) != NULL) { if (entry->databaseoid == dbid || entry->relfilenode.dbNode == dbid) { - hash_search(disk_quota_black_map, entry, HASH_REMOVE, NULL); + hash_search(disk_quota_reject_map, entry, HASH_REMOVE, NULL); } } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); } static char * @@ -1545,42 +1545,42 @@ GetUserName(Oid relowner, bool skip_name) } static void -export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) +export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name) { - BlackMapEntry *blackentry = &entry->keyitem; - switch (blackentry->targettype) + RejectMapEntry *rejectentry = &entry->keyitem; + switch (rejectentry->targettype) { case NAMESPACE_QUOTA: ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("schema's disk space quota exceeded with name: %s", - GetNamespaceName(blackentry->targetoid, skip_name)))); + GetNamespaceName(rejectentry->targetoid, skip_name)))); break; case ROLE_QUOTA: ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("role's disk space quota exceeded with name: %s", - GetUserName(blackentry->targetoid, skip_name)))); + GetUserName(rejectentry->targetoid, skip_name)))); break; case NAMESPACE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, schema: %s diskquota exceeded per segment quota", - GetTablespaceName(blackentry->tablespaceoid, skip_name), - GetNamespaceName(blackentry->targetoid, skip_name)))); + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetNamespaceName(rejectentry->targetoid, skip_name)))); else ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, schema: %s diskquota exceeded", - GetTablespaceName(blackentry->tablespaceoid, skip_name), - GetNamespaceName(blackentry->targetoid, skip_name)))); + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetNamespaceName(rejectentry->targetoid, skip_name)))); break; case ROLE_TABLESPACE_QUOTA: if (entry->segexceeded) ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, role: %s diskquota exceeded per segment quota", - GetTablespaceName(blackentry->tablespaceoid, skip_name), - GetUserName(blackentry->targetoid, skip_name)))); + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetUserName(rejectentry->targetoid, skip_name)))); else ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("tablespace: %s, role: %s diskquota exceeded", - GetTablespaceName(blackentry->tablespaceoid, skip_name), - GetUserName(blackentry->targetoid, skip_name)))); + GetTablespaceName(rejectentry->tablespaceoid, skip_name), + GetUserName(rejectentry->targetoid, skip_name)))); break; default: ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg("diskquota exceeded, unknown quota type"))); @@ -1588,53 +1588,54 @@ export_exceeded_error(GlobalBlackMapEntry *entry, bool skip_name) } /* - * refresh_blackmap() takes two arguments. - * The first argument is an array of blackmap entries on QD. + * refresh_rejectmap() takes two arguments. + * The first argument is an array of rejectmap entries on QD. * The second argument is an array of active relations' oid. * * The basic idea is that, we iterate over the active relations' oid, check that - * whether the relation's owner/tablespace/namespace is in one of the blackmap + * whether the relation's owner/tablespace/namespace is in one of the rejectmap * entries dispatched from diskquota worker from QD. If the relation should be * blocked, we then add its relfilenode together with the toast, toast index, - * appendonly, appendonly index relations' relfilenodes to the global blackmap. + * appendonly, appendonly index relations' relfilenodes to the global rejectmap. * Note that, this UDF is called on segment servers by diskquota worker on QD and - * the global blackmap on segment servers is indexed by relfilenode. + * the global rejectmap on segment servers is indexed by relfilenode. */ -PG_FUNCTION_INFO_V1(refresh_blackmap); +PG_FUNCTION_INFO_V1(refresh_rejectmap); Datum -refresh_blackmap(PG_FUNCTION_ARGS) +refresh_rejectmap(PG_FUNCTION_ARGS) { - ArrayType *blackmap_array_type = PG_GETARG_ARRAYTYPE_P(0); - ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); - Oid blackmap_elem_type = ARR_ELEMTYPE(blackmap_array_type); - Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); - Datum *datums; - bool *nulls; - int16 elem_width; - bool elem_type_by_val; - char elem_alignment_code; - int count; - HeapTupleHeader lt; - bool segexceeded; - GlobalBlackMapEntry *blackmapentry; - HASH_SEQ_STATUS hash_seq; - HTAB *local_blackmap; - HASHCTL hashctl; - int ret_code; + ArrayType *rejectmap_array_type = PG_GETARG_ARRAYTYPE_P(0); + ArrayType *active_oid_array_type = PG_GETARG_ARRAYTYPE_P(1); + Oid rejectmap_elem_type = ARR_ELEMTYPE(rejectmap_array_type); + Oid active_oid_elem_type = ARR_ELEMTYPE(active_oid_array_type); + Datum *datums; + bool *nulls; + int16 elem_width; + bool elem_type_by_val; + char elem_alignment_code; + int count; + HeapTupleHeader lt; + bool segexceeded; + GlobalRejectMapEntry *rejectmapentry; + HASH_SEQ_STATUS hash_seq; + HTAB *local_rejectmap; + HASHCTL hashctl; + int ret_code; if (!superuser()) - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to update blackmap"))); + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to update rejectmap"))); if (IS_QUERY_DISPATCHER()) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("\"refresh_blackmap()\" can only be executed on QE."))); - if (ARR_NDIM(blackmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("\"refresh_rejectmap()\" can only be executed on QE."))); + if (ARR_NDIM(rejectmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); - /* Firstly, clear the blackmap entries. */ - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); - hash_seq_init(&hash_seq, disk_quota_black_map); - while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) - hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_REMOVE, NULL); - LWLockRelease(diskquota_locks.black_map_lock); + /* Firstly, clear the rejectmap entries. */ + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) + hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.reject_map_lock); ret_code = SPI_connect(); if (ret_code != SPI_OK_CONNECT) @@ -1642,55 +1643,55 @@ refresh_blackmap(PG_FUNCTION_ARGS) errmsg("unable to connect to execute internal query, return code: %d", ret_code))); /* - * Secondly, iterate over blackmap entries and add these entries to the local black map + * Secondly, iterate over rejectmap entries and add these entries to the local reject map * on segment servers so that we are able to check whether the given relation (by oid) - * should be blacked in O(1) time complexity in third step. + * should be rejected in O(1) time complexity in third step. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(BlackMapEntry); - hashctl.entrysize = sizeof(GlobalBlackMapEntry); + hashctl.keysize = sizeof(RejectMapEntry); + hashctl.entrysize = sizeof(GlobalRejectMapEntry); hashctl.hcxt = CurrentMemoryContext; hashctl.hash = tag_hash; /* - * Since uncommitted relations' information and the global blackmap entries + * Since uncommitted relations' information and the global rejectmap entries * are cached in shared memory. The memory regions are guarded by lightweight - * locks. In order not to hold multiple locks at the same time, We add blackmap - * entries into the local_blackmap below and then flush the content of the - * local_blackmap to the global blackmap at the end of this UDF. + * locks. In order not to hold multiple locks at the same time, We add rejectmap + * entries into the local_rejectmap below and then flush the content of the + * local_rejectmap to the global rejectmap at the end of this UDF. */ - local_blackmap = hash_create("local_blackmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - get_typlenbyvalalign(blackmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); - deconstruct_array(blackmap_array_type, blackmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, + local_rejectmap = hash_create("local_rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + get_typlenbyvalalign(rejectmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); + deconstruct_array(rejectmap_array_type, rejectmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, &datums, &nulls, &count); for (int i = 0; i < count; ++i) { - BlackMapEntry keyitem; - bool isnull; + RejectMapEntry keyitem; + bool isnull; if (nulls[i]) continue; - memset(&keyitem, 0, sizeof(BlackMapEntry)); + memset(&keyitem, 0, sizeof(RejectMapEntry)); lt = DatumGetHeapTupleHeader(datums[i]); keyitem.targetoid = DatumGetObjectId(GetAttributeByNum(lt, 1, &isnull)); keyitem.databaseoid = DatumGetObjectId(GetAttributeByNum(lt, 2, &isnull)); keyitem.tablespaceoid = DatumGetObjectId(GetAttributeByNum(lt, 3, &isnull)); keyitem.targettype = DatumGetInt32(GetAttributeByNum(lt, 4, &isnull)); - /* blackmap entries from QD should have the real tablespace oid */ + /* rejectmap entries from QD should have the real tablespace oid */ if ((keyitem.targettype == NAMESPACE_TABLESPACE_QUOTA || keyitem.targettype == ROLE_TABLESPACE_QUOTA)) { Assert(OidIsValid(keyitem.tablespaceoid)); } segexceeded = DatumGetBool(GetAttributeByNum(lt, 5, &isnull)); - blackmapentry = hash_search(local_blackmap, &keyitem, HASH_ENTER_NULL, NULL); - if (blackmapentry) blackmapentry->segexceeded = segexceeded; + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_ENTER_NULL, NULL); + if (rejectmapentry) rejectmapentry->segexceeded = segexceeded; } /* * Thirdly, iterate over the active oid list. Check that if the relation should be blocked. * If the relation should be blocked, we insert the toast, toast index, appendonly, appendonly - * index relations to the global black map. + * index relations to the global reject map. */ get_typlenbyvalalign(active_oid_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, elem_type_by_val, elem_alignment_code, @@ -1707,24 +1708,24 @@ refresh_blackmap(PG_FUNCTION_ARGS) tuple = SearchSysCacheCopy1(RELOID, active_oid); if (HeapTupleIsValid(tuple)) { - Form_pg_class form = (Form_pg_class)GETSTRUCT(tuple); - Oid relnamespace = form->relnamespace; - Oid reltablespace = OidIsValid(form->reltablespace) ? form->reltablespace : MyDatabaseTableSpace; - Oid relowner = form->relowner; - BlackMapEntry keyitem; - bool found; + Form_pg_class form = (Form_pg_class)GETSTRUCT(tuple); + Oid relnamespace = form->relnamespace; + Oid reltablespace = OidIsValid(form->reltablespace) ? form->reltablespace : MyDatabaseTableSpace; + Oid relowner = form->relowner; + RejectMapEntry keyitem; + bool found; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { /* Check that if the current relation should be blocked. */ - prepare_blackmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); - blackmapentry = hash_search(local_blackmap, &keyitem, HASH_FIND, &found); - if (found && blackmapentry) + prepare_rejectmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_FIND, &found); + if (found && rejectmapentry) { /* * If the current relation is blocked, we should add the relfilenode * of itself together with the relfilenodes of its toast relation and - * appendonly relations to the global black map. + * appendonly relations to the global reject map. */ List *oid_list = NIL; ListCell *cell = NULL; @@ -1759,7 +1760,7 @@ refresh_blackmap(PG_FUNCTION_ARGS) oid_list = list_concat(oid_list, diskquota_get_index_list(aovisimaprelid)); } - /* Iterate over the oid_list and add their relfilenodes to the blackmap. */ + /* Iterate over the oid_list and add their relfilenodes to the rejectmap. */ foreach (cell, oid_list) { Oid curr_oid = lfirst_oid(cell); @@ -1769,22 +1770,22 @@ refresh_blackmap(PG_FUNCTION_ARGS) Form_pg_class curr_form = (Form_pg_class)GETSTRUCT(curr_tuple); Oid curr_reltablespace = OidIsValid(curr_form->reltablespace) ? curr_form->reltablespace : MyDatabaseTableSpace; - RelFileNode relfilenode = {.dbNode = MyDatabaseId, - .relNode = curr_form->relfilenode, - .spcNode = curr_reltablespace}; - bool found; - GlobalBlackMapEntry *blocked_filenode_entry; - BlackMapEntry blocked_filenode_keyitem; - - memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); + RelFileNode relfilenode = {.dbNode = MyDatabaseId, + .relNode = curr_form->relfilenode, + .spcNode = curr_reltablespace}; + bool found; + GlobalRejectMapEntry *blocked_filenode_entry; + RejectMapEntry blocked_filenode_keyitem; + + memset(&blocked_filenode_keyitem, 0, sizeof(RejectMapEntry)); memcpy(&blocked_filenode_keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); blocked_filenode_entry = - hash_search(local_blackmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); + hash_search(local_rejectmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); if (!found && blocked_filenode_entry) { - memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); - blocked_filenode_entry->segexceeded = blackmapentry->segexceeded; + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(RejectMapEntry)); + blocked_filenode_entry->segexceeded = rejectmapentry->segexceeded; } } } @@ -1808,17 +1809,17 @@ refresh_blackmap(PG_FUNCTION_ARGS) relation_cache_entry = hash_search(relation_cache, &active_oid, HASH_FIND, &found); if (found && relation_cache_entry) { - Oid relnamespace = relation_cache_entry->namespaceoid; - Oid reltablespace = relation_cache_entry->rnode.node.spcNode; - Oid relowner = relation_cache_entry->owneroid; - BlackMapEntry keyitem; + Oid relnamespace = relation_cache_entry->namespaceoid; + Oid reltablespace = relation_cache_entry->rnode.node.spcNode; + Oid relowner = relation_cache_entry->owneroid; + RejectMapEntry keyitem; for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { /* Check that if the current relation should be blocked. */ - prepare_blackmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); - blackmapentry = hash_search(local_blackmap, &keyitem, HASH_FIND, &found); + prepare_rejectmap_search_key(&keyitem, type, relowner, relnamespace, reltablespace); + rejectmapentry = hash_search(local_rejectmap, &keyitem, HASH_FIND, &found); - if (found && blackmapentry) + if (found && rejectmapentry) { List *oid_list = NIL; ListCell *cell = NULL; @@ -1830,24 +1831,24 @@ refresh_blackmap(PG_FUNCTION_ARGS) foreach (cell, oid_list) { - bool found; - GlobalBlackMapEntry *blocked_filenode_entry; - BlackMapEntry blocked_filenode_keyitem; - Oid curr_oid = lfirst_oid(cell); + bool found; + GlobalRejectMapEntry *blocked_filenode_entry; + RejectMapEntry blocked_filenode_keyitem; + Oid curr_oid = lfirst_oid(cell); relation_cache_entry = hash_search(relation_cache, &curr_oid, HASH_FIND, &found); if (found && relation_cache_entry) { - memset(&blocked_filenode_keyitem, 0, sizeof(BlackMapEntry)); + memset(&blocked_filenode_keyitem, 0, sizeof(RejectMapEntry)); memcpy(&blocked_filenode_keyitem.relfilenode, &relation_cache_entry->rnode.node, sizeof(RelFileNode)); - blocked_filenode_entry = - hash_search(local_blackmap, &blocked_filenode_keyitem, HASH_ENTER_NULL, &found); + blocked_filenode_entry = hash_search(local_rejectmap, &blocked_filenode_keyitem, + HASH_ENTER_NULL, &found); if (!found && blocked_filenode_entry) { - memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(BlackMapEntry)); - blocked_filenode_entry->segexceeded = blackmapentry->segexceeded; + memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(RejectMapEntry)); + blocked_filenode_entry->segexceeded = rejectmapentry->segexceeded; } } } @@ -1858,44 +1859,44 @@ refresh_blackmap(PG_FUNCTION_ARGS) } } - /* Flush the content of local_blackmap to the global blackmap. */ - LWLockAcquire(diskquota_locks.black_map_lock, LW_EXCLUSIVE); - hash_seq_init(&hash_seq, local_blackmap); - while ((blackmapentry = hash_seq_search(&hash_seq)) != NULL) + /* Flush the content of local_rejectmap to the global rejectmap. */ + LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, local_rejectmap); + while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) { - bool found; - GlobalBlackMapEntry *new_entry; - new_entry = hash_search(disk_quota_black_map, &blackmapentry->keyitem, HASH_ENTER_NULL, &found); + bool found; + GlobalRejectMapEntry *new_entry; + new_entry = hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_ENTER_NULL, &found); /* * We don't perform soft-limit on segment servers, so we don't flush the - * blackmap entry with a valid targetoid to the global blackmap on segment + * rejectmap entry with a valid targetoid to the global rejectmap on segment * servers. */ - if (!found && new_entry && !OidIsValid(blackmapentry->keyitem.targetoid)) - memcpy(new_entry, blackmapentry, sizeof(GlobalBlackMapEntry)); + if (!found && new_entry && !OidIsValid(rejectmapentry->keyitem.targetoid)) + memcpy(new_entry, rejectmapentry, sizeof(GlobalRejectMapEntry)); } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); SPI_finish(); PG_RETURN_VOID(); } /* - * show_blackmap() provides developers or users to dump the blackmap in shared - * memory on a single server. If you want to query blackmap on segment servers, + * show_rejectmap() provides developers or users to dump the rejectmap in shared + * memory on a single server. If you want to query rejectmap on segment servers, * you should dispatch this query to segments. */ -PG_FUNCTION_INFO_V1(show_blackmap); +PG_FUNCTION_INFO_V1(show_rejectmap); Datum -show_blackmap(PG_FUNCTION_ARGS) +show_rejectmap(PG_FUNCTION_ARGS) { - FuncCallContext *funcctx; - GlobalBlackMapEntry *blackmap_entry; - struct BlackMapCtx + FuncCallContext *funcctx; + GlobalRejectMapEntry *rejectmap_entry; + struct RejectMapCtx { - HASH_SEQ_STATUS blackmap_seq; - HTAB *blackmap; - } * blackmap_ctx; + HASH_SEQ_STATUS rejectmap_seq; + HTAB *rejectmap; + } * rejectmap_ctx; if (SRF_IS_FIRSTCALL()) { @@ -1925,59 +1926,60 @@ show_blackmap(PG_FUNCTION_ARGS) /* Create a local hash table and fill it with entries from shared memory. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(BlackMapEntry); - hashctl.entrysize = sizeof(GlobalBlackMapEntry); + hashctl.keysize = sizeof(RejectMapEntry); + hashctl.entrysize = sizeof(GlobalRejectMapEntry); hashctl.hcxt = CurrentMemoryContext; hashctl.hash = tag_hash; - blackmap_ctx = (struct BlackMapCtx *)palloc(sizeof(struct BlackMapCtx)); - blackmap_ctx->blackmap = - hash_create("blackmap_ctx blackmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + rejectmap_ctx = (struct RejectMapCtx *)palloc(sizeof(struct RejectMapCtx)); + rejectmap_ctx->rejectmap = + hash_create("rejectmap_ctx rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - LWLockAcquire(diskquota_locks.black_map_lock, LW_SHARED); - hash_seq_init(&hash_seq, disk_quota_black_map); - while ((blackmap_entry = hash_seq_search(&hash_seq)) != NULL) + LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmap_entry = hash_seq_search(&hash_seq)) != NULL) { - GlobalBlackMapEntry *local_blackmap_entry = NULL; - local_blackmap_entry = hash_search(blackmap_ctx->blackmap, &blackmap_entry->keyitem, HASH_ENTER_NULL, NULL); - if (local_blackmap_entry) + GlobalRejectMapEntry *local_rejectmap_entry = NULL; + local_rejectmap_entry = + hash_search(rejectmap_ctx->rejectmap, &rejectmap_entry->keyitem, HASH_ENTER_NULL, NULL); + if (local_rejectmap_entry) { - memcpy(&local_blackmap_entry->keyitem, &blackmap_entry->keyitem, sizeof(BlackMapEntry)); - local_blackmap_entry->segexceeded = blackmap_entry->segexceeded; - memcpy(&local_blackmap_entry->auxblockinfo, &blackmap_entry->auxblockinfo, sizeof(BlackMapEntry)); + memcpy(&local_rejectmap_entry->keyitem, &rejectmap_entry->keyitem, sizeof(RejectMapEntry)); + local_rejectmap_entry->segexceeded = rejectmap_entry->segexceeded; + memcpy(&local_rejectmap_entry->auxblockinfo, &rejectmap_entry->auxblockinfo, sizeof(RejectMapEntry)); } } - LWLockRelease(diskquota_locks.black_map_lock); + LWLockRelease(diskquota_locks.reject_map_lock); /* Setup first calling context. */ - hash_seq_init(&(blackmap_ctx->blackmap_seq), blackmap_ctx->blackmap); - funcctx->user_fctx = (void *)blackmap_ctx; + hash_seq_init(&(rejectmap_ctx->rejectmap_seq), rejectmap_ctx->rejectmap); + funcctx->user_fctx = (void *)rejectmap_ctx; MemoryContextSwitchTo(oldcontext); } - funcctx = SRF_PERCALL_SETUP(); - blackmap_ctx = (struct BlackMapCtx *)funcctx->user_fctx; + funcctx = SRF_PERCALL_SETUP(); + rejectmap_ctx = (struct RejectMapCtx *)funcctx->user_fctx; - while ((blackmap_entry = hash_seq_search(&(blackmap_ctx->blackmap_seq))) != NULL) + while ((rejectmap_entry = hash_seq_search(&(rejectmap_ctx->rejectmap_seq))) != NULL) { #define _TARGETTYPE_STR_SIZE 32 - Datum result; - Datum values[9]; - bool nulls[9]; - HeapTuple tuple; - BlackMapEntry keyitem; - char targettype_str[_TARGETTYPE_STR_SIZE]; - RelFileNode blocked_relfilenode; - - memcpy(&blocked_relfilenode, &blackmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); + Datum result; + Datum values[9]; + bool nulls[9]; + HeapTuple tuple; + RejectMapEntry keyitem; + char targettype_str[_TARGETTYPE_STR_SIZE]; + RelFileNode blocked_relfilenode; + + memcpy(&blocked_relfilenode, &rejectmap_entry->keyitem.relfilenode, sizeof(RelFileNode)); /* - * If the blackmap entry is indexed by relfilenode, we dump the blocking + * If the rejectmap entry is indexed by relfilenode, we dump the blocking * condition from auxblockinfo. */ if (!OidIsValid(blocked_relfilenode.relNode)) - memcpy(&keyitem, &blackmap_entry->keyitem, sizeof(keyitem)); + memcpy(&keyitem, &rejectmap_entry->keyitem, sizeof(keyitem)); else - memcpy(&keyitem, &blackmap_entry->auxblockinfo, sizeof(keyitem)); + memcpy(&keyitem, &rejectmap_entry->auxblockinfo, sizeof(keyitem)); memset(targettype_str, 0, sizeof(targettype_str)); switch ((QuotaType)keyitem.targettype) @@ -2003,7 +2005,7 @@ show_blackmap(PG_FUNCTION_ARGS) values[1] = ObjectIdGetDatum(keyitem.targetoid); values[2] = ObjectIdGetDatum(keyitem.databaseoid); values[3] = ObjectIdGetDatum(keyitem.tablespaceoid); - values[4] = BoolGetDatum(blackmap_entry->segexceeded); + values[4] = BoolGetDatum(rejectmap_entry->segexceeded); values[5] = ObjectIdGetDatum(blocked_relfilenode.dbNode); values[6] = ObjectIdGetDatum(blocked_relfilenode.spcNode); values[7] = ObjectIdGetDatum(blocked_relfilenode.relNode); diff --git a/tests/isolation2/expected/test_blackmap.out b/tests/isolation2/expected/test_rejectmap.out similarity index 63% rename from tests/isolation2/expected/test_blackmap.out rename to tests/isolation2/expected/test_rejectmap.out index 7decc2f533a..98401ee6f09 100644 --- a/tests/isolation2/expected/test_blackmap.out +++ b/tests/isolation2/expected/test_rejectmap.out @@ -1,5 +1,5 @@ -- --- This file contains tests for dispatching blackmap and canceling +-- This file contains tests for dispatching rejectmap and canceling -- queries in smgrextend hook by relation's relfilenode. -- @@ -7,7 +7,7 @@ -- For role/namespace quota, return as it is. -- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; CREATE @@ -23,24 +23,24 @@ CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); CREATE INSERT INTO blocked_t1 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t1. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -48,13 +48,13 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 2. Test canceling the extending of a toast relation. @@ -62,24 +62,24 @@ CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); CREATE INSERT INTO blocked_t2 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t2. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -87,13 +87,13 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 3. Test canceling the extending of an appendonly relation. @@ -101,24 +101,24 @@ CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE INSERT INTO blocked_t3 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t3. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -126,13 +126,13 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 4. Test canceling the extending of an index relation. @@ -142,24 +142,24 @@ CREATE INDEX blocked_t4_index ON blocked_t4(i); CREATE INSERT INTO blocked_t4 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t4. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -167,13 +167,13 @@ SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. @@ -181,8 +181,8 @@ CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); CREATE INSERT INTO blocked_t5 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -193,18 +193,18 @@ SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::te ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. @@ -212,8 +212,8 @@ CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); CREATE INSERT INTO blocked_t6 SELECT generate_series(1, 100); INSERT 100 --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: @@ -224,18 +224,18 @@ SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, t ------------------------ (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- Do some clean-ups. @@ -253,7 +253,7 @@ DROP TABLE blocked_t6; DROP -- --- Below are helper functions for testing adding uncommitted relations to blackmap. +-- Below are helper functions for testing adding uncommitted relations to rejectmap. -- -- start_ignore CREATE OR REPLACE LANGUAGE plpythonu; @@ -275,8 +275,8 @@ CREATE CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; CREATE --- This function helps dispatch blackmap for the given relation to seg0. -CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_blackmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.blackmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +-- This function helps dispatch rejectmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; CREATE -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. @@ -284,44 +284,44 @@ CREATE BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+-----------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. @@ -329,44 +329,44 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+-------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=4675) +ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. @@ -374,44 +374,44 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+----------------------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. @@ -419,44 +419,44 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+-----------------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). @@ -464,44 +464,44 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+----------------------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). @@ -509,44 +509,44 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid -------+--------------+---------------+----------+--------------------------+-----------------------+------------ 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 (1 row) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=4675) +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 13. Test that we are able to block a toast relation on seg0 by its namespace. @@ -554,46 +554,46 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid -------+--------------+---------------+----------+---------------------------+-----------------+------------ 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (3 rows) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. @@ -601,26 +601,26 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid -------+--------------+---------------+----------+-------------------------------+-----------------+------------ 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 @@ -628,20 +628,20 @@ SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (4 rows) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. @@ -649,26 +649,26 @@ SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]: BEGIN 1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); dump_relation_cache_to_file ----------------------------- (1 row) --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); block_uncommitted_relation_on_seg0 ------------------------------------ (1 row) -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.blackmap') AS be, read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid -------+--------------+---------------+----------+-------------------------------+-----------------+------------ 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 @@ -676,20 +676,20 @@ SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (4 rows) -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=4675) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) 1: ABORT; ABORT --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_blackmap ------------------- - +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + (1 row) -- Disable check quota by relfilenode on seg0. diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 0530bb21e9f..af3da7127de 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -1,7 +1,7 @@ test: config test: test_create_extension test: test_relation_size -test: test_blackmap +test: test_rejectmap test: test_vacuum test: test_truncate test: test_postmaster_restart diff --git a/tests/isolation2/sql/test_blackmap.sql b/tests/isolation2/sql/test_rejectmap.sql similarity index 72% rename from tests/isolation2/sql/test_blackmap.sql rename to tests/isolation2/sql/test_rejectmap.sql index 408f637ce16..7ddbb42d9a5 100644 --- a/tests/isolation2/sql/test_blackmap.sql +++ b/tests/isolation2/sql/test_rejectmap.sql @@ -1,5 +1,5 @@ -- --- This file contains tests for dispatching blackmap and canceling +-- This file contains tests for dispatching rejectmap and canceling -- queries in smgrextend hook by relation's relfilenode. -- @@ -48,7 +48,7 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ - PERFORM diskquota.refresh_blackmap( /*in func*/ + PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ @@ -58,7 +58,7 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ - ]::diskquota.blackmap_entry[], /*in func*/ + ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ @@ -72,130 +72,130 @@ SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbi -- 1. Test canceling the extending of an ordinary table. CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); INSERT INTO blocked_t1 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t1. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 2. Test canceling the extending of a toast relation. CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); INSERT INTO blocked_t2 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t2. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 3. Test canceling the extending of an appendonly relation. CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); INSERT INTO blocked_t3 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t3. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 4. Test canceling the extending of an index relation. CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); CREATE INDEX blocked_t4_index ON blocked_t4(i); INSERT INTO blocked_t4 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t4. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); --- Dispatch blackmap to seg0. +-- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); INSERT INTO blocked_t5 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); INSERT INTO blocked_t6 SELECT generate_series(1, 100); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- Do some clean-ups. @@ -207,7 +207,7 @@ DROP TABLE blocked_t5; DROP TABLE blocked_t6; -- --- Below are helper functions for testing adding uncommitted relations to blackmap. +-- Below are helper functions for testing adding uncommitted relations to rejectmap. -- -- start_ignore CREATE OR REPLACE LANGUAGE plpythonu; @@ -261,7 +261,7 @@ CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename te END; /*in func*/ $$ LANGUAGE plpgsql; --- This function helps dispatch blackmap for the given relation to seg0. +-- This function helps dispatch rejectmap for the given relation to seg0. CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ @@ -290,7 +290,7 @@ CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_ty FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ - PERFORM diskquota.refresh_blackmap( /*in func*/ + PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ @@ -303,7 +303,7 @@ CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_ty )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ - ]::diskquota.blackmap_entry[], /*in func*/ + ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ @@ -313,232 +313,232 @@ LANGUAGE 'plpgsql'; -- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text), +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 13. Test that we are able to block a toast relation on seg0 by its namespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. 2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, - replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. 2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, - replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. 1: BEGIN; 1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -1: SELECT dump_relation_cache_to_file('/tmp/test_blackmap.csv'); --- Inject 'suspension' to check_blackmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'suspend', dbid) +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; --- Insert a small amount of data into blocked_t7. It will hang up at check_blackmap_by_relfilenode(). +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_blackmap.csv'::text); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); -- Show that blocked_t7 is blocked on seg0. 2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, - replace_oid_with_relname(rel.relname, '/tmp/test_blackmap.csv'::text) AS relname, + replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid - FROM gp_dist_random('diskquota.blackmap') AS be, - read_relation_cache_from_file('/tmp/test_blackmap.csv') AS rel + FROM gp_dist_random('diskquota.rejectmap') AS be, + read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; -SELECT gp_inject_fault_infinite('check_blackmap_by_relfilenode', 'reset', dbid) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1<: 1: ABORT; --- Clean up the blackmap on seg0. -SELECT diskquota.refresh_blackmap( - ARRAY[]::diskquota.blackmap_entry[], ARRAY[]::oid[]) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( + ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; -- Disable check quota by relfilenode on seg0. diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 748090889f3..4b1e691b6b7 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -27,8 +27,8 @@ test: test_activetable_limit test: test_many_active_tables test: test_fetch_table_stat test: test_appendonly -test: test_blackmap -test: test_clean_blackmap_after_drop +test: test_rejectmap +test: test_clean_rejectmap_after_drop test: test_ctas_pause test: test_ctas_role test: test_ctas_schema diff --git a/tests/regress/expected/test_clean_blackmap_after_drop.out b/tests/regress/expected/test_clean_rejectmap_after_drop.out similarity index 86% rename from tests/regress/expected/test_clean_blackmap_after_drop.out rename to tests/regress/expected/test_clean_rejectmap_after_drop.out index 396164ec9c7..3e182106ad1 100644 --- a/tests/regress/expected/test_clean_blackmap_after_drop.out +++ b/tests/regress/expected/test_clean_rejectmap_after_drop.out @@ -1,5 +1,5 @@ -CREATE DATABASE test_clean_blackmap_after_drop; -\c test_clean_blackmap_after_drop +CREATE DATABASE test_clean_rejectmap_after_drop; +\c test_clean_rejectmap_after_drop CREATE EXTENSION diskquota; \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null @@ -24,7 +24,7 @@ ERROR: role's disk space quota exceeded with name: 16574 (seg0 127.0.0.1:6002 DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok \c contrib_regression -DROP DATABASE test_clean_blackmap_after_drop; +DROP DATABASE test_clean_rejectmap_after_drop; DROP ROLE r; \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null \! gpstop -u > /dev/null diff --git a/tests/regress/expected/test_ctas_before_set_quota.out b/tests/regress/expected/test_ctas_before_set_quota.out index 34cd230f1b7..ac69b2b5226 100644 --- a/tests/regress/expected/test_ctas_before_set_quota.out +++ b/tests/regress/expected/test_ctas_before_set_quota.out @@ -37,8 +37,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) --- Expect that current role is in the blackmap -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; rolname --------- test diff --git a/tests/regress/expected/test_ctas_no_preload_lib.out b/tests/regress/expected/test_ctas_no_preload_lib.out index a3541b16c74..172ab45fdb7 100644 --- a/tests/regress/expected/test_ctas_no_preload_lib.out +++ b/tests/regress/expected/test_ctas_no_preload_lib.out @@ -59,8 +59,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) --- Expect that current role is in the blackmap -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; rolname --------- test @@ -78,7 +78,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; rolname --------- (0 rows) diff --git a/tests/regress/expected/test_default_tablespace.out b/tests/regress/expected/test_default_tablespace.out index 3ab8b74b81d..8b9338fa6e7 100644 --- a/tests/regress/expected/test_default_tablespace.out +++ b/tests/regress/expected/test_default_tablespace.out @@ -41,7 +41,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO t SELECT generate_series(1, 1000000); ERROR: tablespace: pg_default, role: role1 diskquota exceeded SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' ORDER BY r.rolname, t.spcname, b.target_type; rolname | spcname | target_type @@ -112,7 +112,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); ERROR: tablespace: custom_tablespace, role: role1 diskquota exceeded SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' ORDER BY r.rolname, t.spcname, b.target_type; rolname | spcname | target_type diff --git a/tests/regress/expected/test_blackmap.out b/tests/regress/expected/test_rejectmap.out similarity index 90% rename from tests/regress/expected/test_blackmap.out rename to tests/regress/expected/test_rejectmap.out index 45cdcc55edc..b8c8930a613 100644 --- a/tests/regress/expected/test_blackmap.out +++ b/tests/regress/expected/test_rejectmap.out @@ -1,8 +1,8 @@ -- --- This file contains tests for dispatching and quering blackmap. +-- This file contains tests for dispatching and quering rejectmap. -- -CREATE SCHEMA s_blackmap; -SET search_path TO s_blackmap; +CREATE SCHEMA s_rejectmap; +SET search_path TO s_rejectmap; -- This function replaces the oid appears in the auxiliary relation's name -- with the corresponding relname of that oid. CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) @@ -61,24 +61,24 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) SELECT relowner INTO targetoid FROM pg_class WHERE relname=rel::text; END CASE; - PERFORM diskquota.refresh_blackmap( + PERFORM diskquota.refresh_rejectmap( ARRAY[ ROW(targetoid, (SELECT oid FROM pg_database WHERE datname=current_database()), (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), bt, false) - ]::diskquota.blackmap_entry[], + ]::diskquota.rejectmap_entry[], ARRAY[rel]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; END; $$ LANGUAGE 'plpgsql'; -- --- 1. Create an ordinary table and add its oid to blackmap on seg0. +-- 1. Create an ordinary table and add its oid to rejectmap on seg0. -- Check that it's relfilenode is blocked on seg0 by various conditions. -- CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 ------------------------ @@ -88,14 +88,14 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | namespace_matched ------------+-----------------+------------------- blocked_t1 | NAMESPACE_QUOTA | t (1 row) --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); block_relation_on_seg0 ------------------------ @@ -105,7 +105,7 @@ SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | owner_matched ------------+-------------+--------------- @@ -116,7 +116,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matche \! mkdir -p /tmp/blocked_space CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); block_relation_on_seg0 ------------------------ @@ -128,14 +128,14 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | namespace_matched | tablespace_matched ------------+----------------------------+-------------------+-------------------- blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t (1 row) --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); block_relation_on_seg0 ------------------------ @@ -147,7 +147,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; relname | target_type | owner_matched | tablespace_matched ------------+-----------------------+---------------+-------------------- @@ -159,7 +159,7 @@ SELECT rel.relname, be.target_type, -- index are blocked on seg0. -- CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); --- Insert an entry for blocked_t2 to blackmap on seg0. +-- Insert an entry for blocked_t2 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 ------------------------ @@ -172,7 +172,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched @@ -188,7 +188,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE INDEX blocked_t3_index ON blocked_t3(i); --- Insert an entry for blocked_t3 to blackmap on seg0. +-- Insert an entry for blocked_t3 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 ------------------------ @@ -201,7 +201,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched @@ -220,7 +220,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t4_index ON blocked_t4(i); --- Insert an entry for blocked_t4 to blackmap on seg0. +-- Insert an entry for blocked_t4 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 ------------------------ @@ -233,7 +233,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched @@ -252,7 +252,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t5_index ON blocked_t5(i); --- Insert an entry for blocked_t5 to blackmap on seg0. +-- Insert an entry for blocked_t5 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); block_relation_on_seg0 ------------------------ @@ -265,7 +265,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched @@ -291,4 +291,4 @@ DROP TABLE blocked_t4; DROP TABLE blocked_t5; DROP TABLESPACE blocked_space; SET search_path TO DEFAULT; -DROP SCHEMA s_blackmap; +DROP SCHEMA s_rejectmap; diff --git a/tests/regress/expected/test_tablespace_diff_schema.out b/tests/regress/expected/test_tablespace_diff_schema.out index 65c0036c430..93da486b836 100644 --- a/tests/regress/expected/test_tablespace_diff_schema.out +++ b/tests/regress/expected/test_tablespace_diff_schema.out @@ -22,7 +22,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- with hardlimits off, expect to success INSERT INTO a SELECT generate_series(1,1000000); --- wait for next loop for bgworker to add it to blackmap +-- wait for next loop for bgworker to add it to rejectmap SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/sql/test_clean_blackmap_after_drop.sql b/tests/regress/sql/test_clean_rejectmap_after_drop.sql similarity index 80% rename from tests/regress/sql/test_clean_blackmap_after_drop.sql rename to tests/regress/sql/test_clean_rejectmap_after_drop.sql index debb2c5b909..2eebb09f5db 100644 --- a/tests/regress/sql/test_clean_blackmap_after_drop.sql +++ b/tests/regress/sql/test_clean_rejectmap_after_drop.sql @@ -1,6 +1,6 @@ -CREATE DATABASE test_clean_blackmap_after_drop; +CREATE DATABASE test_clean_rejectmap_after_drop; -\c test_clean_blackmap_after_drop +\c test_clean_rejectmap_after_drop CREATE EXTENSION diskquota; \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null @@ -19,7 +19,7 @@ DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok \c contrib_regression -DROP DATABASE test_clean_blackmap_after_drop; +DROP DATABASE test_clean_rejectmap_after_drop; DROP ROLE r; \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null diff --git a/tests/regress/sql/test_ctas_before_set_quota.sql b/tests/regress/sql/test_ctas_before_set_quota.sql index 3263731ecb4..8e3cb08ab4f 100644 --- a/tests/regress/sql/test_ctas_before_set_quota.sql +++ b/tests/regress/sql/test_ctas_before_set_quota.sql @@ -18,8 +18,8 @@ SELECT diskquota.set_role_quota(current_role, '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); --- Expect that current role is in the blackmap -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; SELECT diskquota.set_role_quota(current_role, '-1'); diff --git a/tests/regress/sql/test_ctas_no_preload_lib.sql b/tests/regress/sql/test_ctas_no_preload_lib.sql index 717189da5ce..38b4478df52 100644 --- a/tests/regress/sql/test_ctas_no_preload_lib.sql +++ b/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -40,14 +40,14 @@ SELECT diskquota.set_role_quota(current_role, '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); --- Expect that current role is in the blackmap -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; SELECT diskquota.set_role_quota(current_role, '-1'); SELECT diskquota.wait_for_worker_new_epoch(); -SELECT rolname FROM pg_authid, diskquota.blackmap WHERE oid = target_oid; +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; DROP TABLE t_without_diskquota; diff --git a/tests/regress/sql/test_default_tablespace.sql b/tests/regress/sql/test_default_tablespace.sql index fb6e4ec63b3..ede1e48180c 100644 --- a/tests/regress/sql/test_default_tablespace.sql +++ b/tests/regress/sql/test_default_tablespace.sql @@ -28,7 +28,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO t SELECT generate_series(1, 1000000); SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' ORDER BY r.rolname, t.spcname, b.target_type; @@ -70,7 +70,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.blackmap AS b, pg_tablespace AS t, pg_roles AS r +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' ORDER BY r.rolname, t.spcname, b.target_type; diff --git a/tests/regress/sql/test_blackmap.sql b/tests/regress/sql/test_rejectmap.sql similarity index 87% rename from tests/regress/sql/test_blackmap.sql rename to tests/regress/sql/test_rejectmap.sql index 545c688d4ca..9cdb6f772ee 100644 --- a/tests/regress/sql/test_blackmap.sql +++ b/tests/regress/sql/test_rejectmap.sql @@ -1,9 +1,9 @@ -- --- This file contains tests for dispatching and quering blackmap. +-- This file contains tests for dispatching and quering rejectmap. -- -CREATE SCHEMA s_blackmap; -SET search_path TO s_blackmap; +CREATE SCHEMA s_rejectmap; +SET search_path TO s_rejectmap; -- This function replaces the oid appears in the auxiliary relation's name -- with the corresponding relname of that oid. @@ -65,41 +65,41 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) SELECT relowner INTO targetoid FROM pg_class WHERE relname=rel::text; END CASE; - PERFORM diskquota.refresh_blackmap( + PERFORM diskquota.refresh_rejectmap( ARRAY[ ROW(targetoid, (SELECT oid FROM pg_database WHERE datname=current_database()), (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), bt, false) - ]::diskquota.blackmap_entry[], + ]::diskquota.rejectmap_entry[], ARRAY[rel]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; END; $$ LANGUAGE 'plpgsql'; -- --- 1. Create an ordinary table and add its oid to blackmap on seg0. +-- 1. Create an ordinary table and add its oid to rejectmap on seg0. -- Check that it's relfilenode is blocked on seg0 by various conditions. -- CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- Create a tablespace to test the rest of blocking types. @@ -107,7 +107,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matche CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. @@ -115,10 +115,10 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; --- Insert an entry for blocked_t1 to blackmap on seg0. +-- Insert an entry for blocked_t1 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); -- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. @@ -126,7 +126,7 @@ SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched, (be.tablespace_oid=rel.reltablespace) AS tablespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; -- @@ -134,7 +134,7 @@ SELECT rel.relname, be.target_type, -- index are blocked on seg0. -- CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); --- Insert an entry for blocked_t2 to blackmap on seg0. +-- Insert an entry for blocked_t2 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast @@ -143,7 +143,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; @@ -153,7 +153,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); CREATE INDEX blocked_t3_index ON blocked_t3(i); --- Insert an entry for blocked_t3 to blackmap on seg0. +-- Insert an entry for blocked_t3 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly @@ -162,7 +162,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; @@ -172,7 +172,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t4_index ON blocked_t4(i); --- Insert an entry for blocked_t4 to blackmap on seg0. +-- Insert an entry for blocked_t4 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly @@ -181,7 +181,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; @@ -191,7 +191,7 @@ SELECT replace_oid_with_relname(rel.relname), -- CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); CREATE INDEX blocked_t5_index ON blocked_t5(i); --- Insert an entry for blocked_t5 to blackmap on seg0. +-- Insert an entry for blocked_t5 to rejectmap on seg0. SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); -- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast @@ -200,7 +200,7 @@ SELECT replace_oid_with_relname(rel.relname), rel.relkind, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.blackmap') AS be + gp_dist_random('diskquota.rejectmap') AS be WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid ORDER BY rel.relname DESC; @@ -215,4 +215,4 @@ DROP TABLE blocked_t4; DROP TABLE blocked_t5; DROP TABLESPACE blocked_space; SET search_path TO DEFAULT; -DROP SCHEMA s_blackmap; +DROP SCHEMA s_rejectmap; diff --git a/tests/regress/sql/test_tablespace_diff_schema.sql b/tests/regress/sql/test_tablespace_diff_schema.sql index 4e432e99cbb..fadfb0d6f79 100644 --- a/tests/regress/sql/test_tablespace_diff_schema.sql +++ b/tests/regress/sql/test_tablespace_diff_schema.sql @@ -17,7 +17,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- with hardlimits off, expect to success INSERT INTO a SELECT generate_series(1,1000000); --- wait for next loop for bgworker to add it to blackmap +-- wait for next loop for bgworker to add it to rejectmap SELECT diskquota.wait_for_worker_new_epoch(); -- expect to fail INSERT INTO a SELECT generate_series(1,1000000); diff --git a/upgrade_test/expected/2.0_catalog.out b/upgrade_test/expected/2.0_catalog.out index 476936bc069..7d7aa740bd5 100644 --- a/upgrade_test/expected/2.0_catalog.out +++ b/upgrade_test/expected/2.0_catalog.out @@ -27,11 +27,11 @@ ORDER BY t1.typname; typname | typname ----------------------------------------+---------------------------------------------------------------------------------- - blackmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} - blackmap_entry | {bool,int4,oid,oid,oid} - blackmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} diskquota_active_table_type | {int8,int2,oid} quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,_oid} show_fast_database_size_view | {numeric} show_fast_role_quota_view | {name,int8,oid,numeric} @@ -62,11 +62,11 @@ ORDER BY relname; relname | reltype | reloftype -----------------------------+-------------------------------+----------- - blackmap_entry | {blackmap_entry} | - blackmap_entry_detail | {blackmap_entry_detail} | diskquota_active_table_type | {diskquota_active_table_type} | quota_config | {quota_config} | quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | relation_cache_detail | {relation_cache_detail} | state | {state} | state_pkey | | @@ -97,39 +97,39 @@ WHERE AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable ORDER BY proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl ------------------------------+-------------------------------+------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.0.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.0.so | - pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.0.so | - pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.0.so | - refresh_blackmap | {void} | {_blackmap_entry,_oid} | | | refresh_blackmap | $libdir/diskquota-2.0.so | - relation_size | {int8} | {regclass} | | | +| | - | | | | | SELECT SUM(size)::bigint FROM ( +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | - | | | | | UNION ALL +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM pg_class WHERE oid = relation +| | - | | | | | ) AS t | | - relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.0.so | - resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.0.so | - set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.0.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.0.so | - set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.0.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.0.so | - set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.0.so | - show_blackmap | {blackmap_entry_detail} | | | | show_blackmap | $libdir/diskquota-2.0.so | - show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.0.so | - show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | - | | | | | WITH relation_cache AS ( +| | - | | | | | SELECT diskquota.show_relation_cache() AS a +| | - | | | | | FROM gp_dist_random('gp_id') +| | - | | | | | ) +| | - | | | | | SELECT (a).* FROM relation_cache; | | - show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.0.so | - status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.0.so | - wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.0.so | + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.0.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.0.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.0.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.0.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.0.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM pg_class WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.0.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.0.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.0.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.0.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.0.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.0.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.0.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.0.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.0.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.0.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.0.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.0.so | (19 rows) -- UDF end @@ -144,127 +144,127 @@ WHERE schemaname = 'diskquota' ORDER by schemaname, viewname; - schemaname | viewname | definition -------------+----------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------- - diskquota | blackmap | SELECT bm.target_type, + - | | bm.target_oid, + - | | bm.database_oid, + - | | bm.tablespace_oid, + - | | bm.seg_exceeded, + - | | bm.dbnode, + - | | bm.spcnode, + - | | bm.relnode, + - | | bm.segid + - | | FROM diskquota.show_blackmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size + + schemaname | viewname | definition +------------+----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + | | WHERE (table_size.segid = (-1)))) AS dbsize; - diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + - | | SELECT pg_class.relowner, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + - | | GROUP BY pg_class.relowner + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | quota_config.targetoid AS role_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + - | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT pg_class.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + | | WHERE (quota_config.quotatype = 1); - diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT pg_class.relowner, + - | | CASE + - | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE pg_class.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class, + - | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + - | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.primaryoid AS role_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT pg_class.relowner, + + | | CASE + + | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE pg_class.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class, + + | | default_tablespace + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + - | | SELECT pg_class.relnamespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + - | | GROUP BY pg_class.relnamespace + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | quota_config.targetoid AS schema_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + - | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT pg_class.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + | | WHERE (quota_config.quotatype = 0); - diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT pg_class.relnamespace, + - | | CASE + - | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE pg_class.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class, + - | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + - | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.primaryoid AS schema_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT pg_class.relnamespace, + + | | CASE + + | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE pg_class.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | pg_class, + + | | default_tablespace + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + - | | pg_tablespace.oid AS tablespace_oid, + - | | quota_config.segratio AS per_seg_quota_ratio + - | | FROM (diskquota.quota_config + + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); (7 rows) From 7d4d66d9b2efa3ac785d3d7fbb5547a69fbc08c2 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 25 Apr 2022 11:06:10 +0800 Subject: [PATCH 194/330] Reject set quota for system owner(#215) --- diskquota_utility.c | 12 ++++++++++++ tests/init_file | 3 +++ tests/regress/expected/test_role.out | 12 ++++++++++++ tests/regress/expected/test_tablespace_role.out | 12 ++++++++++++ tests/regress/sql/test_role.sql | 6 ++++++ tests/regress/sql/test_tablespace_role.sql | 7 +++++++ 6 files changed, 52 insertions(+) diff --git a/diskquota_utility.c b/diskquota_utility.c index cb5065baa42..0e18a25382e 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -24,6 +24,7 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" +#include "catalog/pg_authid.h" #include "catalog/pg_collation.h" #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" @@ -95,6 +96,7 @@ static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, Quota static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); +static void check_role(Oid roleoid, char *rolname); List *get_rel_oid_list(void); @@ -707,6 +709,7 @@ set_role_quota(PG_FUNCTION_ARGS) rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); roleoid = get_role_oid(rolname, false); + check_role(roleoid, rolname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); @@ -782,6 +785,7 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); roleoid = get_role_oid(rolname, false); + check_role(roleoid, rolname); spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); @@ -1685,3 +1689,11 @@ to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio) return true; return false; } + +static void +check_role(Oid roleoid, char *rolname) +{ + if (roleoid == BOOTSTRAP_SUPERUSERID) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Can not set disk quota for system owner: %s", rolname))); +} diff --git a/tests/init_file b/tests/init_file index 498482f3a9a..21a00398e8c 100644 --- a/tests/init_file +++ b/tests/init_file @@ -32,4 +32,7 @@ s/ERROR: tablespace: \d+, role: \d+ diskquota exceeded.*/[hardlimit] tablespace m/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/ s/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/[hardlimit] tablespace-schema's disk space quota exceeded/ + +m/^ERROR: Can not set disk quota for system owner:.*/ +s/^ERROR: Can not set disk quota for system owner:.*/ERROR: Can not set disk quota from system owner:/ -- end_matchsubs diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index c15987506ac..20a4d1a6a4d 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -102,6 +102,18 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); +-- superuser is blocked to set quota +--start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + xx +(1 row) + +--end_ignore +\gset +select diskquota.set_role_quota(:'rolname', '1mb'); +ERROR: Can not set disk quota for system owner: xx DROP TABLE b, b2; DROP ROLE u1, u2; RESET search_path; diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index ea8afc99d07..db2947e60e7 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -147,6 +147,18 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +-- superuser is blocked to set quota +-- start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + xx +(1 row) + +-- end_ignore +\gset +select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); +ERROR: Can not set disk quota for system owner: xx DROP TABLE b, b2; DROP ROLE rolespcu1, rolespcu2; RESET search_path; diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index 8edaa545ac2..6836db09b99 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -50,6 +50,12 @@ INSERT INTO b SELECT generate_series(1,100); -- expect insert succeed INSERT INTO b2 SELECT generate_series(1,100); +-- superuser is blocked to set quota +--start_ignore +SELECT rolname from pg_roles where rolsuper=true; +--end_ignore +\gset +select diskquota.set_role_quota(:'rolname', '1mb'); DROP TABLE b, b2; DROP ROLE u1, u2; RESET search_path; diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index dd84118722b..2368c2d7004 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -77,6 +77,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +-- superuser is blocked to set quota +-- start_ignore +SELECT rolname from pg_roles where rolsuper=true; +-- end_ignore +\gset +select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); + DROP TABLE b, b2; DROP ROLE rolespcu1, rolespcu2; RESET search_path; From 3384abc233275e4d52a8a4716e5ea6796e3aa139 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Mon, 18 Apr 2022 18:16:13 +0800 Subject: [PATCH 195/330] fix set quota with upper case object name the old code transform all object name to lower case on purpose. create schema "S1"; select * from diskquota.set_schema_quota("S1", '1MB'); ERROR: schema "s1" does not exist if the object name wrapped with '"'. eg '"Foo"' will search 'Foo' if not will always search the lower case 'foo'. --- diskquota_utility.c | 47 +++++++++++++------ tests/regress/expected/test_role.out | 18 +++++-- tests/regress/expected/test_schema.out | 15 +++--- .../regress/expected/test_tablespace_role.out | 39 ++++++++++++--- .../expected/test_tablespace_role_perseg.out | 25 ++++++++-- .../expected/test_tablespace_schema.out | 18 +++++-- .../test_tablespace_schema_perseg.out | 28 +++++------ tests/regress/sql/test_role.sql | 8 +++- tests/regress/sql/test_schema.sql | 5 +- tests/regress/sql/test_tablespace_role.sql | 12 +++++ .../sql/test_tablespace_role_perseg.sql | 12 ++++- tests/regress/sql/test_tablespace_schema.sql | 10 +++- .../sql/test_tablespace_schema_perseg.sql | 24 +++++----- 13 files changed, 194 insertions(+), 67 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 0e18a25382e..4da17bdd638 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -690,6 +690,31 @@ ddl_err_code_to_err_message(MessageResult code) } } +static Datum +__get_oid_auto_case_convert(Oid (*f)(const char *name, bool missing_ok), const char *name) +{ + char *b = NULL; + int l = strlen(name); + Oid ret = InvalidOid; + + if (l > 2 && name[0] == '"' && name[l - 1] == '"') + { + // object name wrapped by '"'. eg: "foo" + b = palloc(l); + StrNCpy(b, name + 1, l - 1); // trim the '"'. unlike strncpy, StrNCpy will ensure b[l-1] = '\0' + } + else + { + // lower the object name if not wrapped by '"' + b = str_tolower(name, strlen(name), DEFAULT_COLLATION_OID); + } + + ret = f(b, false); + + pfree(b); + return ret; +} + /* * Set disk quota limit for role. */ @@ -707,8 +732,7 @@ set_role_quota(PG_FUNCTION_ARGS) } rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); - roleoid = get_role_oid(rolname, false); + roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); check_role(roleoid, rolname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); @@ -742,8 +766,7 @@ set_schema_quota(PG_FUNCTION_ARGS) } nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); - namespaceoid = get_namespace_oid(nspname, false); + namespaceoid = __get_oid_auto_case_convert(get_namespace_oid, nspname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); @@ -783,17 +806,16 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) } rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - rolname = str_tolower(rolname, strlen(rolname), DEFAULT_COLLATION_OID); - roleoid = get_role_oid(rolname, false); + roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); check_role(roleoid, rolname); spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); - spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); quota_limit_mb = get_size_in_mb(sizestr); + if (quota_limit_mb == 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); @@ -830,12 +852,10 @@ set_schema_tablespace_quota(PG_FUNCTION_ARGS) } nspname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - nspname = str_tolower(nspname, strlen(nspname), DEFAULT_COLLATION_OID); - namespaceoid = get_namespace_oid(nspname, false); + namespaceoid = __get_oid_auto_case_convert(get_namespace_oid, nspname); spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); - spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(2)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); @@ -1231,8 +1251,7 @@ set_per_segment_quota(PG_FUNCTION_ARGS) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to set disk quota limit"))); spcname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - spcname = str_tolower(spcname, strlen(spcname), DEFAULT_COLLATION_OID); - spcoid = get_tablespace_oid(spcname, false); + spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); ratio = PG_GETARG_FLOAT4(1); diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index 20a4d1a6a4d..2923632ac1d 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -107,14 +107,26 @@ INSERT INTO b2 SELECT generate_series(1,100); SELECT rolname from pg_roles where rolsuper=true; rolname --------- - xx + sa (1 row) --end_ignore \gset select diskquota.set_role_quota(:'rolname', '1mb'); -ERROR: Can not set disk quota for system owner: xx +ERROR: Can not set disk quota for system owner: sa +CREATE ROLE "Tn" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); + set_role_quota +---------------- + +(1 row) + DROP TABLE b, b2; -DROP ROLE u1, u2; +DROP ROLE u1, u2, "Tn"; RESET search_path; DROP SCHEMA srole; diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index e4116a0b448..866b4b3e127 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -2,8 +2,6 @@ CREATE SCHEMA s1; SET search_path TO s1; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert success INSERT INTO a SELECT generate_series(1,100000); @@ -23,8 +21,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name: s1 CREATE TABLE a2(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name: s1 @@ -48,8 +44,6 @@ NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); SELECT diskquota.init_table_size_table(); @@ -100,9 +94,16 @@ SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE s1 | 1 (1 row) +CREATE SCHEMA "Tn1"; +SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + RESET search_path; DROP TABLE s1.a2, badquota.a; -DROP SCHEMA s1, s2; +DROP SCHEMA s1, s2, "Tn1"; DROP TABLE badquota.t1; DROP ROLE testbody; DROP SCHEMA badquota; diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index db2947e60e7..beed1ae8a38 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -8,17 +8,15 @@ CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; CREATE SCHEMA rolespcrole; SET search_path TO rolespcrole; DROP ROLE IF EXISTS rolespcu1; +NOTICE: role "rolespcu1" does not exist, skipping DROP ROLE IF EXISTS rolespcu2; +NOTICE: role "rolespcu2" does not exist, skipping CREATE ROLE rolespcu1 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespcu2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b2 OWNER TO rolespcu1; INSERT INTO b SELECT generate_series(1,100); -- expect insert success @@ -152,16 +150,45 @@ INSERT INTO b SELECT generate_series(1,100); SELECT rolname from pg_roles where rolsuper=true; rolname --------- - xx + sa (1 row) -- end_ignore \gset select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); -ERROR: Can not set disk quota for system owner: xx +ERROR: Can not set disk quota for system owner: sa +-- start_ignore +\! mkdir -p /tmp/rolespc3 +-- end_ignore +DROP ROLE IF EXISTS "Rolespcu3"; +NOTICE: role "Rolespcu3" does not exist, skipping +CREATE ROLE "Rolespcu3" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +DROP TABLESPACE IF EXISTS "Rolespc3"; +NOTICE: tablespace "Rolespc3" does not exist, skipping +CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); + set_role_tablespace_quota +--------------------------- + +(1 row) + DROP TABLE b, b2; DROP ROLE rolespcu1, rolespcu2; RESET search_path; DROP SCHEMA rolespcrole; DROP TABLESPACE rolespc; DROP TABLESPACE rolespc2; +DROP TABLESPACE "Rolespc3"; diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index 9deaf41f8ec..c30030325d7 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -16,8 +16,6 @@ NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespc_persegu2 NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE b OWNER TO rolespc_persegu1; SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); set_role_tablespace_quota @@ -208,9 +206,30 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; +NOTICE: tablespace "Rolespc_perseg3" does not exist, skipping +CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; +CREATE ROLE "Rolespc_persegu3" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + DROP table b; -DROP ROLE rolespc_persegu1, rolespc_persegu2; +DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; RESET search_path; DROP SCHEMA rolespc_persegrole; DROP TABLESPACE rolespc_perseg; DROP TABLESPACE rolespc_perseg2; +DROP TABLESPACE "Rolespc_perseg3"; diff --git a/tests/regress/expected/test_tablespace_schema.out b/tests/regress/expected/test_tablespace_schema.out index 2d5fb1bdf15..a7e57c594be 100644 --- a/tests/regress/expected/test_tablespace_schema.out +++ b/tests/regress/expected/test_tablespace_schema.out @@ -8,8 +8,6 @@ NOTICE: tablespace "schemaspc" does not exist, skipping CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; SET search_path TO spcs1; CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); @@ -29,8 +27,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a2 SELECT generate_series(1,100); ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded @@ -130,8 +126,22 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/schemaspc3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc3"; +NOTICE: tablespace "Schemaspc3" does not exist, skipping +CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; +CREATE SCHEMA "Spcs2"; +SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + RESET search_path; DROP TABLE spcs1.a2, spcs1.a; DROP SCHEMA spcs1, spcs2; DROP TABLESPACE schemaspc; DROP TABLESPACE schemaspc2; +DROP TABLESPACE "Schemaspc3"; diff --git a/tests/regress/expected/test_tablespace_schema_perseg.out b/tests/regress/expected/test_tablespace_schema_perseg.out index ca99c82750a..c27f3e0ea9e 100644 --- a/tests/regress/expected/test_tablespace_schema_perseg.out +++ b/tests/regress/expected/test_tablespace_schema_perseg.out @@ -94,10 +94,10 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- start_ignore \! mkdir -p /tmp/schemaspc_perseg2 -- end_ignore -DROP TABLESPACE IF EXISTS schemaspc_perseg2; -NOTICE: tablespace "schemaspc_perseg2" does not exist, skipping -CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; -ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; +NOTICE: tablespace "Schemaspc_perseg2" does not exist, skipping +CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -203,19 +203,19 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR (0 rows) -- test config per segment quota -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); set_per_segment_quota ----------------------- (1 row) -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; segratio ---------- 1 (1 row) -SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2','1 MB'); +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); set_schema_tablespace_quota ----------------------------- @@ -229,13 +229,13 @@ SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.t 1 (1 row) -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','-2'); +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); set_per_segment_quota ----------------------- (1 row) -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; segratio ---------- (0 rows) @@ -248,13 +248,13 @@ SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.t 0 (1 row) -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','3'); +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); set_per_segment_quota ----------------------- (1 row) -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; segratio ---------- 3 @@ -268,15 +268,15 @@ SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.t 3 (1 row) -SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('schemaspc_perseg2', 'schemaspc_perseg'); +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); tablespace_name | per_seg_quota_ratio -------------------+--------------------- schemaspc_perseg | 2 - schemaspc_perseg2 | 3 + Schemaspc_perseg2 | 3 (2 rows) RESET search_path; DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; DROP TABLESPACE schemaspc_perseg; -DROP TABLESPACE schemaspc_perseg2; +DROP TABLESPACE "Schemaspc_perseg2"; diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index 6836db09b99..8def1a48b6b 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -56,7 +56,13 @@ SELECT rolname from pg_roles where rolsuper=true; --end_ignore \gset select diskquota.set_role_quota(:'rolname', '1mb'); + +CREATE ROLE "Tn" NOLOGIN; +SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail +SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail +SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); + DROP TABLE b, b2; -DROP ROLE u1, u2; +DROP ROLE u1, u2, "Tn"; RESET search_path; DROP SCHEMA srole; diff --git a/tests/regress/sql/test_schema.sql b/tests/regress/sql/test_schema.sql index ea70b270655..3478a8d84b9 100644 --- a/tests/regress/sql/test_schema.sql +++ b/tests/regress/sql/test_schema.sql @@ -47,9 +47,12 @@ INSERT INTO badquota.a SELECT generate_series(0, 100); SELECT diskquota.wait_for_worker_new_epoch(); SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; +CREATE SCHEMA "Tn1"; +SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); + RESET search_path; DROP TABLE s1.a2, badquota.a; -DROP SCHEMA s1, s2; +DROP SCHEMA s1, s2, "Tn1"; DROP TABLE badquota.t1; DROP ROLE testbody; diff --git a/tests/regress/sql/test_tablespace_role.sql b/tests/regress/sql/test_tablespace_role.sql index 2368c2d7004..a1a524b638b 100644 --- a/tests/regress/sql/test_tablespace_role.sql +++ b/tests/regress/sql/test_tablespace_role.sql @@ -84,9 +84,21 @@ SELECT rolname from pg_roles where rolsuper=true; \gset select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); +-- start_ignore +\! mkdir -p /tmp/rolespc3 +-- end_ignore +DROP ROLE IF EXISTS "Rolespcu3"; +CREATE ROLE "Rolespcu3" NOLOGIN; +DROP TABLESPACE IF EXISTS "Rolespc3"; +CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); + DROP TABLE b, b2; DROP ROLE rolespcu1, rolespcu2; RESET search_path; DROP SCHEMA rolespcrole; DROP TABLESPACE rolespc; DROP TABLESPACE rolespc2; +DROP TABLESPACE "Rolespc3"; diff --git a/tests/regress/sql/test_tablespace_role_perseg.sql b/tests/regress/sql/test_tablespace_role_perseg.sql index 4c49f7bfbb3..4a71e1d2614 100644 --- a/tests/regress/sql/test_tablespace_role_perseg.sql +++ b/tests/regress/sql/test_tablespace_role_perseg.sql @@ -90,9 +90,19 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO b SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; +CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; +CREATE ROLE "Rolespc_persegu3" NOLOGIN; +SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); +SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); + DROP table b; -DROP ROLE rolespc_persegu1, rolespc_persegu2; +DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; RESET search_path; DROP SCHEMA rolespc_persegrole; DROP TABLESPACE rolespc_perseg; DROP TABLESPACE rolespc_perseg2; +DROP TABLESPACE "Rolespc_perseg3"; diff --git a/tests/regress/sql/test_tablespace_schema.sql b/tests/regress/sql/test_tablespace_schema.sql index be3e6fe56fb..b9281da965a 100644 --- a/tests/regress/sql/test_tablespace_schema.sql +++ b/tests/regress/sql/test_tablespace_schema.sql @@ -66,9 +66,17 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert success INSERT INTO a SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/schemaspc3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc3"; +CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; +CREATE SCHEMA "Spcs2"; +SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); + RESET search_path; DROP TABLE spcs1.a2, spcs1.a; DROP SCHEMA spcs1, spcs2; DROP TABLESPACE schemaspc; DROP TABLESPACE schemaspc2; - +DROP TABLESPACE "Schemaspc3"; diff --git a/tests/regress/sql/test_tablespace_schema_perseg.sql b/tests/regress/sql/test_tablespace_schema_perseg.sql index 8fb1c33f3d1..3d1ffb4cf12 100644 --- a/tests/regress/sql/test_tablespace_schema_perseg.sql +++ b/tests/regress/sql/test_tablespace_schema_perseg.sql @@ -44,9 +44,9 @@ SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FR -- start_ignore \! mkdir -p /tmp/schemaspc_perseg2 -- end_ignore -DROP TABLESPACE IF EXISTS schemaspc_perseg2; -CREATE TABLESPACE schemaspc_perseg2 LOCATION '/tmp/schemaspc_perseg2'; -ALTER TABLE a SET TABLESPACE schemaspc_perseg2; +DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; +CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; SELECT diskquota.wait_for_worker_new_epoch(); -- expect insert succeed INSERT INTO a SELECT generate_series(1,200); @@ -84,35 +84,35 @@ INSERT INTO a SELECT generate_series(1,100); SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; -- test config per segment quota -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','1'); -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; -SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', 'schemaspc_perseg2','1 MB'); +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','-2'); +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; -SELECT diskquota.set_per_segment_quota('schemaspc_perseg2','3'); +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'schemaspc_perseg2'; +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; -SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('schemaspc_perseg2', 'schemaspc_perseg'); +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); RESET search_path; DROP TABLE spcs1_perseg.a; DROP SCHEMA spcs1_perseg; DROP TABLESPACE schemaspc_perseg; -DROP TABLESPACE schemaspc_perseg2; +DROP TABLESPACE "Schemaspc_perseg2"; From ab7b65de33bb85a3ee861e285109c9f0bedce2f7 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 28 Apr 2022 09:47:13 +0800 Subject: [PATCH 196/330] Hint message when launcher cannot be connected (#216) --- diskquota_utility.c | 63 +++++++++++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 19 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 4da17bdd638..e6047440037 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -87,11 +87,22 @@ PG_FUNCTION_INFO_V1(pull_all_table_size); #define INVALID_SEGRATIO 0.0 #define INVALID_QUOTA 0 +#define report_ddl_err(ddl_msg, prefix) \ + do \ + { \ + MessageResult ddl_result_ = (MessageResult)ddl_msg->result; \ + const char *ddl_err_; \ + const char *ddl_hint_; \ + ddl_err_code_to_err_message(ddl_result_, &ddl_err_, &ddl_hint_); \ + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("%s: %s", prefix, ddl_err_), \ + ddl_hint_ ? errhint("%s", ddl_hint_) : 0)); \ + } while (0) + static object_access_hook_type next_object_access_hook; static bool is_database_empty(void); -static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); -static const char *ddl_err_code_to_err_message(MessageResult code); -static int64 get_size_in_mb(char *str); +static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); +static void ddl_err_code_to_err_message(MessageResult code, const char **err_msg, const char **hint_msg); +static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid); static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static float4 get_per_segment_ratio(Oid spcoid); @@ -356,7 +367,10 @@ diskquota_start_worker(PG_FUNCTION_ARGS) ResetLatch(&MyProc->procLatch); ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - ERROR, (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid), + errhint("The diskquota launcher process has been terminated for some reasons. Consider to " + "restart the cluster to start it."))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) @@ -372,8 +386,7 @@ diskquota_start_worker(PG_FUNCTION_ARGS) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); - elog(ERROR, "[diskquota] failed to create diskquota extension: %s", - ddl_err_code_to_err_message((MessageResult)extension_ddl_message->result)); + report_ddl_err(extension_ddl_message, "[diskquota] failed to create diskquota extension"); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); @@ -611,7 +624,10 @@ dq_object_access_hook_on_drop(void) ResetLatch(&MyProc->procLatch); ereportif(kill(launcher_pid, 0) == -1 && errno == ESRCH, // do existence check - ERROR, (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid))); + ERROR, + (errmsg("[diskquota] diskquota launcher pid = %d no longer exists", launcher_pid), + errhint("The diskquota launcher process has been terminated for some reasons. Consider to " + "restart the cluster to start it."))); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_SHARED); if (extension_ddl_message->result != ERR_PENDING) @@ -627,8 +643,7 @@ dq_object_access_hook_on_drop(void) { LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); - elog(ERROR, "[diskquota launcher] failed to drop diskquota extension: %s", - ddl_err_code_to_err_message((MessageResult)extension_ddl_message->result)); + report_ddl_err(extension_ddl_message, "[diskquota] failed to drop diskquota extension"); } LWLockRelease(diskquota_locks.extension_ddl_message_lock); LWLockRelease(diskquota_locks.extension_ddl_lock); @@ -666,27 +681,37 @@ dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int su * Using this function to convert error code from diskquota * launcher to error message and return it to client. */ -static const char * -ddl_err_code_to_err_message(MessageResult code) +static void +ddl_err_code_to_err_message(MessageResult code, const char **err_msg, const char **hint_msg) { + *hint_msg = NULL; switch (code) { case ERR_PENDING: - return "no response from diskquota launcher, check whether launcher process exists"; + *err_msg = "no response from diskquota launcher, check whether launcher process exists"; + *hint_msg = "Create \"diskquota\" database and restart the cluster."; + break; case ERR_OK: - return "succeeded"; + *err_msg = "succeeded"; + break; case ERR_EXCEED: - return "too many databases to monitor"; + *err_msg = "too many databases to monitor"; + break; case ERR_ADD_TO_DB: - return "add dbid to database_list failed"; + *err_msg = "add dbid to database_list failed"; + break; case ERR_DEL_FROM_DB: - return "delete dbid from database_list failed"; + *err_msg = "delete dbid from database_list failed"; + break; case ERR_START_WORKER: - return "start diskquota worker failed"; + *err_msg = "start diskquota worker failed"; + break; case ERR_INVALID_DBID: - return "invalid dbid"; + *err_msg = "invalid dbid"; + break; default: - return "unknown error"; + *err_msg = "unknown error"; + break; } } From 75adaecb317118dc67dd0285e56b62d040ba11d0 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 28 Apr 2022 17:36:51 +0800 Subject: [PATCH 197/330] SPI & entry clear in refresh_blackmap (#211) - refresh_blackmap doesn't need SPI calls. - Clearing should happen within the same lock of refreshing entries. Since there will be a short time that other processes see an empty rejectmap while checking the hard limit. --- quotamodel.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 3cb714c8933..f8533da1cf4 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1620,7 +1620,6 @@ refresh_rejectmap(PG_FUNCTION_ARGS) HASH_SEQ_STATUS hash_seq; HTAB *local_rejectmap; HASHCTL hashctl; - int ret_code; if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to update rejectmap"))); @@ -1630,20 +1629,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) if (ARR_NDIM(rejectmap_array_type) > 1 || ARR_NDIM(active_oid_array_type) > 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("1-dimensional array needed"))); - /* Firstly, clear the rejectmap entries. */ - LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); - hash_seq_init(&hash_seq, disk_quota_reject_map); - while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) - hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_REMOVE, NULL); - LWLockRelease(diskquota_locks.reject_map_lock); - - ret_code = SPI_connect(); - if (ret_code != SPI_OK_CONNECT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("unable to connect to execute internal query, return code: %d", ret_code))); - /* - * Secondly, iterate over rejectmap entries and add these entries to the local reject map + * Iterate over rejectmap entries and add these entries to the local reject map * on segment servers so that we are able to check whether the given relation (by oid) * should be rejected in O(1) time complexity in third step. */ @@ -1859,8 +1846,14 @@ refresh_rejectmap(PG_FUNCTION_ARGS) } } - /* Flush the content of local_rejectmap to the global rejectmap. */ LWLockAcquire(diskquota_locks.reject_map_lock, LW_EXCLUSIVE); + + /* Clear rejectmap entries. */ + hash_seq_init(&hash_seq, disk_quota_reject_map); + while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) + hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_REMOVE, NULL); + + /* Flush the content of local_rejectmap to the global rejectmap. */ hash_seq_init(&hash_seq, local_rejectmap); while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) { @@ -1877,7 +1870,6 @@ refresh_rejectmap(PG_FUNCTION_ARGS) } LWLockRelease(diskquota_locks.reject_map_lock); - SPI_finish(); PG_RETURN_VOID(); } From dc21e40344952af94d6bc241fa583d512d6c00b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Fri, 29 Apr 2022 11:30:52 +0800 Subject: [PATCH 198/330] Minimize the risk of stale relfilenodes (#217) Since we do NOT take any lock when accessing info of a relation, cache invalidation messages may not be delievered in time each time we SeearchSysCache() for relfilenode. As a result, the relfilenode may be stale and the table size may be incorrect. This patch fixes this issue by - Checking for invalidation messages each time before SearchSysCache(RELOID). - Not removing a relation from active table map if the relfilenode is stale. Besides, since we do not remove the relfilenode if it maps to no valid relation oid, we need remove those dangling relfilenodes when unlink() to avoid memory leak. --- gp_activetable.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++- quotamodel.c | 19 +++++++++++++ relation_cache.c | 13 +++++++++ 3 files changed, 104 insertions(+), 1 deletion(-) diff --git a/gp_activetable.c b/gp_activetable.c index 4da2f7e9db8..f15bdbe4371 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -31,6 +31,8 @@ #include "libpq-fe.h" #include "utils/faultinjector.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/inval.h" #include "gp_activetable.h" #include "diskquota.h" @@ -69,6 +71,7 @@ static void pull_active_table_size_from_seg(HTAB *local_table_stats_ma static StringInfoData convert_map_to_string(HTAB *active_list); static void load_table_size(HTAB *local_table_stats_map); static void report_active_table_helper(const RelFileNodeBackend *relFileNode); +static void remove_from_active_table_map(const RelFileNodeBackend *relFileNode); static void report_relation_cache_helper(Oid relid); static void report_altered_reloid(Oid reloid); @@ -164,6 +167,11 @@ active_table_hook_smgrunlink(RelFileNodeBackend rnode) { if (prev_file_unlink_hook) (*prev_file_unlink_hook)(rnode); + /* + * Since we do not remove the relfilenode if it does not map to any valid + * relation oid, we need to do the cleaning here to avoid memory leak + */ + remove_from_active_table_map(&rnode); remove_cache_entry(InvalidOid, rnode.node.relNode); } @@ -296,6 +304,23 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) LWLockRelease(diskquota_locks.active_table_lock); } +/* + * Remove relfilenode from the active table map if exists. + */ +static void +remove_from_active_table_map(const RelFileNodeBackend *relFileNode) +{ + DiskQuotaActiveTableFileEntry item = {0}; + + item.dbid = relFileNode->node.dbNode; + item.relfilenode = relFileNode->node.relNode; + item.tablespaceoid = relFileNode->node.spcNode; + + LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); + hash_search(active_tables_map, &item, HASH_REMOVE, NULL); + LWLockRelease(diskquota_locks.active_table_lock); +} + /* * Interface of activetable module * This function is called by quotamodel module. @@ -333,6 +358,9 @@ gp_fetch_active_tables(bool is_init) local_active_table_oid_maps = pull_active_list_from_seg(); active_oid_list = convert_map_to_string(local_active_table_oid_maps); + ereport(DEBUG1, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] active_old_list = %s", active_oid_list.data))); + /* step 2: fetch active table sizes based on active oids */ pull_active_table_size_from_seg(local_table_stats_map, active_oid_list.data); @@ -599,6 +627,42 @@ is_relation_being_altered(Oid relid) return being_altered; } +/* + * Check whether the cached relfilenode is stale compared to the given one + * due to delayed cache invalidation messages. + * + * NOTE: It will return false if the relation is currently uncommitted. + */ +static bool +is_cached_relfilenode_stale(Oid relOid, RelFileNode rnode) +{ + /* + * Since we don't take any lock on relation, need to check for cache + * invalidation messages manually. + */ + AcceptInvalidationMessages(); + HeapTuple tp = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); + + /* + * Tuple is not valid if + * - The relation has not been committed yet, or + * - The relation has been deleted + */ + if (!HeapTupleIsValid(tp)) return false; + Form_pg_class reltup = (Form_pg_class)GETSTRUCT(tp); + + /* + * If cache invalidation messages are not delievered in time, the + * relfilenode in the tuple of the relation is stale. In that case, + * the relfilenode in the relation tuple is not equal to the one in + * the active table map. + */ + Oid cached_relfilenode = reltup->relfilenode; + bool is_stale = cached_relfilenode != rnode.relNode; + heap_freetuple(tp); + return is_stale; +} + /* * Get local active table with table oid and table size info. * This function first copies active table map from shared memory @@ -700,8 +764,15 @@ get_active_tables_oid(void) active_table_entry->tablesize = 0; active_table_entry->segid = -1; } - if (!is_relation_being_altered(relOid)) + /* + * Do NOT remove relation from the active table map if it is being + * altered or its cached relfilenode is stale so that we can check it + * again in the next epoch. + */ + if (!is_relation_being_altered(relOid) && !is_cached_relfilenode_stale(relOid, rnode)) + { hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); + } } } diff --git a/quotamodel.c b/quotamodel.c index f8533da1cf4..1c533a84c84 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -31,6 +31,7 @@ #include "utils/builtins.h" #include "utils/guc.h" #include "utils/faultinjector.h" +#include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -1322,6 +1323,12 @@ get_rel_owner_schema_tablespace(Oid relid, Oid *ownerOid, Oid *nsOid, Oid *table { HeapTuple tp; + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); bool found = HeapTupleIsValid(tp); if (HeapTupleIsValid(tp)) @@ -1351,6 +1358,12 @@ get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname) { HeapTuple tp; + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); bool found = HeapTupleIsValid(tp); if (found) @@ -1692,6 +1705,12 @@ refresh_rejectmap(PG_FUNCTION_ARGS) active_oid = DatumGetObjectId(datums[i]); if (!OidIsValid(active_oid)) continue; + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); tuple = SearchSysCacheCopy1(RELOID, active_oid); if (HeapTupleIsValid(tuple)) { diff --git a/relation_cache.c b/relation_cache.c index 5ab2693d442..d7ae8c9390f 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -19,6 +19,7 @@ #include "utils/relfilenodemap.h" #include "utils/syscache.h" #include "utils/array.h" +#include "utils/inval.h" #include "funcapi.h" #include "relation_cache.h" @@ -449,6 +450,12 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry *relatio Oid visimaprelid = InvalidOid; bool is_ao = false; + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(classTup) || relation_entry == NULL) { @@ -549,6 +556,12 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) Form_pg_class classForm; memset(rnode, 0, sizeof(RelFileNodeBackend)); + /* + * Since we don't take any lock on relation, check for cache + * invalidation messages manually to minimize risk of cache + * inconsistency. + */ + AcceptInvalidationMessages(); classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid)); if (HeapTupleIsValid(classTup)) { From 46d44efb855e6d301775b17b446aecd90c9964cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 5 May 2022 10:59:56 +0800 Subject: [PATCH 199/330] Report error if diskquota is not ready (#218) Previously, setting quotas when diskquota is not ready is allowed. But in that case, quotas set will not take effect. This can confuse the user. This patch fixes this issue by reporting an error when setting quotas if diskquota is not ready. This is done by checking diskquota.state. This patch also fixes possible deadlocks in the existing test cases. --- diskquota.h | 1 + diskquota_utility.c | 8 +- quotamodel.c | 97 +++++++++---------- tests/regress/diskquota_schedule | 1 + .../test_clean_rejectmap_after_drop.out | 14 ++- tests/regress/expected/test_extension.out | 7 +- .../regress/expected/test_update_db_cache.out | 3 +- .../expected/test_worker_not_ready.out | 25 +++++ .../sql/test_clean_rejectmap_after_drop.sql | 2 + tests/regress/sql/test_worker_not_ready.sql | 20 ++++ 10 files changed, 118 insertions(+), 60 deletions(-) create mode 100644 tests/regress/expected/test_worker_not_ready.out create mode 100644 tests/regress/sql/test_worker_not_ready.sql diff --git a/diskquota.h b/diskquota.h index a308de59e77..b86e7e72f42 100644 --- a/diskquota.h +++ b/diskquota.h @@ -182,5 +182,6 @@ extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); extern bool worker_increase_epoch(Oid database_oid); extern unsigned int worker_get_epoch(Oid database_oid); extern bool diskquota_is_paused(void); +extern void do_check_diskquota_state_is_ready(void); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index e6047440037..312472cfc6f 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -394,9 +394,8 @@ diskquota_start_worker(PG_FUNCTION_ARGS) /* notify DBA to run init_table_size_table() when db is not empty */ if (!is_database_empty()) { - ereport(WARNING, (errmsg("database is not empty, please run `select diskquota.init_table_size_table()` to " - "initialize table_size information for diskquota extension. Note that for large " - "database, this function may take a long time."))); + ereport(WARNING, (errmsg("[diskquota] diskquota is not ready because current database is not empty"), + errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); } PG_RETURN_VOID(); } @@ -910,6 +909,9 @@ set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, f { int ret; + /* Report error if diskquota is not ready. */ + do_check_diskquota_state_is_ready(); + /* * If error happens in set_quota_config_internal, just return error messages to * the client side. So there is no need to catch the error. diff --git a/quotamodel.c b/quotamodel.c index 1c533a84c84..74f748e6a85 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -181,7 +181,6 @@ static void flush_local_reject_map(void); static void dispatch_rejectmap(HTAB *local_active_table_stat_map); static bool load_quotas(void); static void do_load_quotas(void); -static bool do_check_diskquota_state_is_ready(void); static Size DiskQuotaShmemSize(void); static void disk_quota_shmem_startup(void); @@ -514,6 +513,28 @@ init_disk_quota_model(void) &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); } +static void +dispatch_my_db_to_all_segments(void) +{ + /* Add current database to the monitored db cache on all segments */ + int ret = SPI_execute_with_args( + "SELECT diskquota.diskquota_fetch_table_stat($1, ARRAY[]::oid[]) FROM gp_dist_random('gp_id')", 1, + (Oid[]){ + INT4OID, + }, + (Datum[]){ + Int32GetDatum(ADD_DB_TO_MONITOR), + }, + NULL, true, 0); + + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + /* Add current database to the monitored db cache on coordinator */ + update_diskquota_db_list(MyDatabaseId, HASH_ENTER); +} + /* * Check whether the diskquota state is ready */ @@ -542,7 +563,9 @@ check_diskquota_state_is_ready(void) connected = true; PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; - is_ready = do_check_diskquota_state_is_ready(); + dispatch_my_db_to_all_segments(); + do_check_diskquota_state_is_ready(); + is_ready = true; } PG_CATCH(); { @@ -566,76 +589,46 @@ check_diskquota_state_is_ready(void) } /* - * Check whether the diskquota state is ready - * For empty database, the diskquota state would - * be ready after 'create extension diskquota' and - * it's ready to use. But for non-empty database, + * Check whether the diskquota state is ready. Throw an error if it is not. + * + * For empty database, table diskquota.state would be ready after + * 'CREATE EXTENSION diskquota;'. But for non-empty database, * user need to run UDF diskquota.init_table_size_table() * manually to get all the table size information and * store them into table diskquota.table_size */ -static bool +void do_check_diskquota_state_is_ready(void) { int ret; TupleDesc tupdesc; - int i; - - /* Add current database to the monitored db cache on all segments */ - ret = SPI_execute_with_args( - "SELECT diskquota.diskquota_fetch_table_stat($1, ARRAY[]::oid[]) FROM gp_dist_random('gp_id')", 1, - (Oid[]){ - INT4OID, - }, - (Datum[]){ - Int32GetDatum(ADD_DB_TO_MONITOR), - }, - NULL, true, 0); - - ereportif(ret != SPI_OK_SELECT, ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); - - /* Add current database to the monitored db cache on coordinator */ - update_diskquota_db_list(MyDatabaseId, HASH_ENTER); - /* - * check diskquota state from table diskquota.state errors will be catch - * at upper level function. - */ ret = SPI_execute("select state from diskquota.state", true, 0); ereportif(ret != SPI_OK_SELECT, ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; - if (tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) + if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) { - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] table \"state\" is corrupted in database \"%s\"," - " please recreate diskquota extension", - get_database_name(MyDatabaseId)))); + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] \"diskquota.state\" is corrupted in database \"%s\"," + " please recreate diskquota extension", + get_database_name(MyDatabaseId)))); } - for (i = 0; i < SPI_processed; i++) - { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - int state; - bool isnull; + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + int state; + bool isnull; - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) continue; - state = DatumGetInt64(dat); + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + state = isnull ? DISKQUOTA_UNKNOWN_STATE : DatumGetInt32(dat); - if (state == DISKQUOTA_READY_STATE) - { - return true; - } + if (state != DISKQUOTA_READY_STATE) + { + ereport(ERROR, (errmsg("[diskquota] diskquota is not ready"), + errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); } - ereport(WARNING, (errmsg("Diskquota is not in ready state. " - "please run UDF init_table_size_table()"))); - - return false; } /* diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 4b1e691b6b7..899d60f6d25 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -13,6 +13,7 @@ test: test_quota_view_no_table # disable this test due to GPDB behavior change # test: test_table_size test: test_fast_disk_check +test: test_worker_not_ready #test: test_insert_after_drop test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate test: test_ctas_no_preload_lib diff --git a/tests/regress/expected/test_clean_rejectmap_after_drop.out b/tests/regress/expected/test_clean_rejectmap_after_drop.out index 3e182106ad1..2c25b6b81fb 100644 --- a/tests/regress/expected/test_clean_rejectmap_after_drop.out +++ b/tests/regress/expected/test_clean_rejectmap_after_drop.out @@ -20,7 +20,19 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: role's disk space quota exceeded with name: 16574 (seg0 127.0.0.1:6002 pid=356116) +ERROR: role's disk space quota exceeded with name: 34523 (seg0 127.0.0.1:6002 pid=23690) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok \c contrib_regression diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 9a2fd427abd..794b1bd854b 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -54,7 +54,8 @@ CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); INSERT INTO SX.a values(generate_series(0, 100000)); CREATE EXTENSION diskquota; -WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota SELECT diskquota.init_table_size_table(); init_table_size_table ----------------------- @@ -282,14 +283,14 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:286) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:376) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 SELECT diskquota.wait_for_worker_new_epoch(); ERROR: schema "diskquota" does not exist \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:286) +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:376) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 11 SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out index 398d9c5f006..6d45170a645 100644 --- a/tests/regress/expected/test_update_db_cache.out +++ b/tests/regress/expected/test_update_db_cache.out @@ -35,7 +35,8 @@ DROP EXTENSION diskquota; CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; -WARNING: database is not empty, please run `select diskquota.init_table_size_table()` to initialize table_size information for diskquota extension. Note that for large database, this function may take a long time. +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -- Sleep until the worker adds the current db to cache so that it can be found -- when DROP EXTENSION. -- FIXME: We cannot use wait_for_worker_new_epoch() here because diff --git a/tests/regress/expected/test_worker_not_ready.out b/tests/regress/expected/test_worker_not_ready.out new file mode 100644 index 00000000000..6e916b4791d --- /dev/null +++ b/tests/regress/expected/test_worker_not_ready.out @@ -0,0 +1,25 @@ +CREATE DATABASE db_not_ready; +\c db_not_ready; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); +ERROR: Can not set disk quota for system owner: gpadmin +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- diskquota.wait_for_worker_new_epoch() cannot be used here because +-- diskquota.state is not clean. +SELECT pg_sleep(5); + pg_sleep +---------- + +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE db_not_ready; diff --git a/tests/regress/sql/test_clean_rejectmap_after_drop.sql b/tests/regress/sql/test_clean_rejectmap_after_drop.sql index 2eebb09f5db..10a5f9618c0 100644 --- a/tests/regress/sql/test_clean_rejectmap_after_drop.sql +++ b/tests/regress/sql/test_clean_rejectmap_after_drop.sql @@ -14,6 +14,8 @@ SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO b SELECT generate_series(1, 100000000); -- fail +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; INSERT INTO b SELECT generate_series(1, 100); -- ok diff --git a/tests/regress/sql/test_worker_not_ready.sql b/tests/regress/sql/test_worker_not_ready.sql new file mode 100644 index 00000000000..e095e4b8a82 --- /dev/null +++ b/tests/regress/sql/test_worker_not_ready.sql @@ -0,0 +1,20 @@ +CREATE DATABASE db_not_ready; +\c db_not_ready; + +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; + +SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); + +SELECT diskquota.pause(); + +-- diskquota.wait_for_worker_new_epoch() cannot be used here because +-- diskquota.state is not clean. +SELECT pg_sleep(5); + +DROP EXTENSION diskquota; + +\c contrib_regression + +DROP DATABASE db_not_ready; From 2cde5d8cb5be71fef867f48c8ba90ca4e821eff0 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Fri, 6 May 2022 16:51:01 +0800 Subject: [PATCH 200/330] Allow deleting quota for super user (#219) --- diskquota_utility.c | 12 +++++++----- tests/regress/expected/test_role.out | 6 ++++++ tests/regress/sql/test_role.sql | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/diskquota_utility.c b/diskquota_utility.c index 312472cfc6f..801a6e2f32e 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -107,7 +107,7 @@ static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, Quota static int set_target_internal(Oid primaryoid, Oid spcoid, int64 quota_limit_mb, QuotaType type); static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); -static void check_role(Oid roleoid, char *rolname); +static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb); List *get_rel_oid_list(void); @@ -757,7 +757,6 @@ set_role_quota(PG_FUNCTION_ARGS) rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); - check_role(roleoid, rolname); sizestr = text_to_cstring(PG_GETARG_TEXT_PP(1)); sizestr = str_tolower(sizestr, strlen(sizestr), DEFAULT_COLLATION_OID); @@ -767,6 +766,8 @@ set_role_quota(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } + check_role(roleoid, rolname, quota_limit_mb); + SPI_connect(); set_quota_config_internal(roleoid, quota_limit_mb, ROLE_QUOTA, INVALID_SEGRATIO, InvalidOid); SPI_finish(); @@ -831,7 +832,6 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) rolname = text_to_cstring(PG_GETARG_TEXT_PP(0)); roleoid = __get_oid_auto_case_convert(get_role_oid, rolname); - check_role(roleoid, rolname); spcname = text_to_cstring(PG_GETARG_TEXT_PP(1)); spcoid = __get_oid_auto_case_convert(get_tablespace_oid, spcname); @@ -844,6 +844,7 @@ set_role_tablespace_quota(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("disk quota can not be set to 0 MB"))); } + check_role(roleoid, rolname, quota_limit_mb); SPI_connect(); row_id = set_target_internal(roleoid, spcoid, quota_limit_mb, ROLE_TABLESPACE_QUOTA); @@ -1737,9 +1738,10 @@ to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio) } static void -check_role(Oid roleoid, char *rolname) +check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) { - if (roleoid == BOOTSTRAP_SUPERUSERID) + /* reject setting quota for super user, but deletion is allowed */ + if (roleoid == BOOTSTRAP_SUPERUSERID && quota_limit_mb >= 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Can not set disk quota for system owner: %s", rolname))); } diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index 2923632ac1d..f4d6690c736 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -114,6 +114,12 @@ SELECT rolname from pg_roles where rolsuper=true; \gset select diskquota.set_role_quota(:'rolname', '1mb'); ERROR: Can not set disk quota for system owner: sa +select diskquota.set_role_quota(:'rolname', '-1mb'); + set_role_quota +---------------- + +(1 row) + CREATE ROLE "Tn" NOLOGIN; NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail diff --git a/tests/regress/sql/test_role.sql b/tests/regress/sql/test_role.sql index 8def1a48b6b..6472c4c5e7b 100644 --- a/tests/regress/sql/test_role.sql +++ b/tests/regress/sql/test_role.sql @@ -56,6 +56,7 @@ SELECT rolname from pg_roles where rolsuper=true; --end_ignore \gset select diskquota.set_role_quota(:'rolname', '1mb'); +select diskquota.set_role_quota(:'rolname', '-1mb'); CREATE ROLE "Tn" NOLOGIN; SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail From d99c93258f0f87612fe26b1db2861adba4fc3a5d Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 30 May 2022 14:52:42 +0800 Subject: [PATCH 201/330] Release pipeline is publishing wrong intermediates (#221) "get" without "passed" will retrieve the latest content from a resource which is not expected release. Especially the release and dev pipelines share the intermediates bucket. - Move release intermediates to its own bucket - Add passed the "get" step in the "exit_release" job, to make sure the resource version generated from previous job will be consumed in this job. --- concourse/pipeline/job_def.lib.yml | 21 ++++++++++++++++----- concourse/pipeline/res_def.yml | 30 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 76798f09125..c3d372da1aa 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -1,6 +1,14 @@ #@ load("base.lib.yml", "add_res_by_conf", "add_res_by_name") #@ load("@ytt:template", "template") +#@ def inter_bin_name(base_name, release_build): +#@ if release_build: +#@ return base_name + "_rel" +#@ end +#@ +#@ return base_name +#@ end + #! Job config for centos6 #! Use bin_gpdb_postfix="" to use a release version of gpdb binary #@ def centos6_gpdb6_conf(release_build=False): @@ -8,7 +16,7 @@ res_build_image: centos6-gpdb6-image-build res_test_image: centos6-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel6 -res_intermediates_bin: bin_diskquota_gpdb6_rhel6_intermediates +res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel6_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel6_release os: rhel6 build_type: #@ "Release" if release_build else "Debug" @@ -20,7 +28,7 @@ res_build_image: centos7-gpdb6-image-build res_test_image: centos7-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel7 -res_intermediates_bin: bin_diskquota_gpdb6_rhel7_intermediates +res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel7_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel7_release os: rhel7 build_type: #@ "Release" if release_build else "Debug" @@ -32,7 +40,7 @@ res_build_image: rhel8-gpdb6-image-build res_test_image: rhel8-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_rhel8 -res_intermediates_bin: bin_diskquota_gpdb6_rhel8_intermediates +res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel8_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel8_release os: rhel8 build_type: #@ "Release" if release_build else "Debug" @@ -44,7 +52,7 @@ res_build_image: ubuntu18-gpdb6-image-build res_test_image: ubuntu18-gpdb6-image-test res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 -res_intermediates_bin: bin_diskquota_gpdb6_ubuntu18_intermediates +res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_ubuntu18_intermediates", release_build) release_bin: bin_diskquota_gpdb6_ubuntu18_release os: ubuntu18.04 build_type: #@ "Release" if release_build else "Debug" @@ -153,9 +161,12 @@ plan: #@ end - in_parallel: steps: -#@ for conf in confs: +#@ for i in range(len(confs)): +#@ conf = confs[i] - do: - get: #@ conf["res_intermediates_bin"] + passed: + - #@ passed_jobs[i] params: unpack: true - put: #@ conf["release_bin"] diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 55411888b3c..b7f7d3d3feb 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -199,6 +199,7 @@ resources: regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz # For uploading every build to gcs +# Dev - name: bin_diskquota_gpdb6_rhel6_intermediates type: gcs source: @@ -227,6 +228,35 @@ resources: json_key: ((extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz +# Rel +- name: bin_diskquota_gpdb6_rhel6_intermediates_rel + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates_release/diskquota/diskquota_rhel6_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_rhel7_intermediates_rel + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates_release/diskquota/diskquota_rhel7_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_rhel8_intermediates_rel + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb6.tar.gz + +- name: bin_diskquota_gpdb6_ubuntu18_intermediates_rel + type: gcs + source: + bucket: gpdb-extensions-concourse-resources + json_key: ((extensions-gcs-service-account-key)) + versioned_file: intermediates_release/diskquota/diskquota_ubuntu18_gpdb6.tar.gz + # For uploading to the release bucket - name: bin_diskquota_gpdb6_rhel6_release type: gcs From d6d47b8a9768099d1b27d1356753953deae1330c Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 30 May 2022 17:39:35 +0800 Subject: [PATCH 202/330] Bump version to 2.0.1 (#222) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 227cea21564..38f77a65b30 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0 +2.0.1 From 37a4caf6474c708aca5aeb5475212eb83fd3b782 Mon Sep 17 00:00:00 2001 From: Sasasu Date: Tue, 31 May 2022 20:24:08 +0800 Subject: [PATCH 203/330] Compatible with ancient git (#223) git 1.7.1 dose not support `git diff /absolute/path/to/current/project` make our CI happy. --- upgrade_test/CMakeLists.txt | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index be07de9f678..79af27813d5 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -16,19 +16,26 @@ regresstarget_add( REGRESS_OPTS --dbname=contrib_regression) +exec_program( + git ${CMAKE_SOURCE_DIR} ARGS + tag | sort --version-sort -r | head -n 1 + OUTPUT_VARIABLE latest_tag +) + # check whether DDL file (*.sql) is modified file(GLOB ddl_files ${CMAKE_SOURCE_DIR}/*.sql) foreach(ddl IN LISTS ddl_files) + cmake_path(GET ddl FILENAME ddl) exec_program( - git ARGS - diff --exit-code ${ddl} + git ${CMAKE_SOURCE_DIR} ARGS + diff ${latest_tag} --exit-code ${ddl} OUTPUT_VARIABLE NULL RETURN_VALUE "${ddl}_modified") if("${${ddl}_modified}") message( NOTICE - "detected DDL file ${ddl} is modified, checking if upgrade test is needed." + "compared to ${latest_tag}, the DDL file ${ddl} is modified, checking if upgrade test is needed." ) set(DISKQUOTA_DDL_MODIFIED TRUE) endif() From 76bbce478caa1a04b554931e1fa264bd834e3e8e Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 25 Jul 2022 22:24:34 +0800 Subject: [PATCH 204/330] Fix compiler version on concourse (#225) cmake will look up c compiler by checking /usr/bin/cc. On CentOS7, that symbolic linked to the gcc in the same directory. Although a newer gcc is installed, cmake will not use the newer one. We always export the CC and CXX, then the gcc/g++ in the $PATH will be used. --- concourse/scripts/build_diskquota.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 312e49896bd..ed87ced6dde 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -6,9 +6,9 @@ function pkg() { [ -f /opt/gcc_env.sh ] && source /opt/gcc_env.sh source /usr/local/greenplum-db-devel/greenplum_path.sh - if [ "${DISKQUOTA_OS}" = "rhel6" ]; then - export CC="$(which gcc)" - fi + # Always use the gcc from $PATH, to avoid using a lower version compiler by /usr/bin/cc + export CC="$(which gcc)" + export CXX="$(which g++)" pushd /home/gpadmin/diskquota_artifacts local last_release_path From 959e09fb9766c604cfbb5a414790b3a2d7bde9ac Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 4 Aug 2022 15:57:22 +0800 Subject: [PATCH 205/330] [CI] Fetch secrets from vault. (#226) This patch teaches concourse to fetch secrets from vault. --- concourse/fly.sh | 3 -- concourse/pipeline/res_def.yml | 68 +++++++++++++++++----------------- 2 files changed, 34 insertions(+), 37 deletions(-) diff --git a/concourse/fly.sh b/concourse/fly.sh index d63cb58e853..7ee7eeb1b33 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -108,9 +108,6 @@ set -v sp \ -p "${pipeline_name}" \ -c "${yml_path}" \ - -l "${workspace}/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml" \ - -l "${workspace}/gp-continuous-integration/secrets/gp-extensions-common.yml" \ - -l "${workspace}/gp-continuous-integration/secrets/gpdb_6X_STABLE-ci-secrets.prod.yml" \ -v "diskquota-branch=${branch}" set +v diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index b7f7d3d3feb..7f9ccfe02f3 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -21,23 +21,23 @@ resources: - name: diskquota_pr type: pull-request # We should rely on the webhook. See README if webhook doesn't work - webhook_token: ((diskquota-webhook-token)) + webhook_token: ((extension/diskquota-webhook-token)) check_every: 24h source: disable_forks: false repository: greenplum-db/diskquota - access_token: ((github-access-token)) + access_token: ((extension/github-access-token)) base_branch: gpdb # Commit trigger - name: diskquota_commit type: git # We should rely on the webhook. See README if webhook doesn't work - webhook_token: ((diskquota-webhook-token)) + webhook_token: ((extension/diskquota-webhook-token)) check_every: 1h source: branch: ((diskquota-branch)) uri: https://github.com/greenplum-db/diskquota.git - username: ((github-access-token)) + username: ((extension/github-access-token)) password: x-oauth-basic # Commit dev trigger. Not using webhook - name: diskquota_commit_dev @@ -46,7 +46,7 @@ resources: source: branch: ((diskquota-branch)) uri: https://github.com/greenplum-db/diskquota.git - username: ((github-access-token)) + username: ((extension/github-access-token)) password: x-oauth-basic @@ -87,14 +87,14 @@ resources: repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build tag: latest username: _json_key - password: ((container-registry-readonly-service-account-key)) + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) - name: rhel8-gpdb6-image-test type: registry-image source: repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test tag: latest username: _json_key - password: ((container-registry-readonly-service-account-key)) + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) # Ubuntu18 - name: ubuntu18-gpdb6-image-build type: registry-image @@ -113,7 +113,7 @@ resources: repository: gcr.io/data-gpdb-extensions/common/clang-format tag: 13 username: _json_key - password: ((extensions-gcs-service-account-key)) + password: ((extension/extensions-gcs-service-account-key)) # gpdb binary on gcs is located as different folder for different version # Latest build with assertion enabled: @@ -121,25 +121,25 @@ resources: - name: bin_gpdb6_centos6_debug type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.debug.tar.gz - name: bin_gpdb6_centos7_debug type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.debug.tar.gz - name: bin_gpdb6_rhel8_debug type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.debug.tar.gz - name: bin_gpdb6_ubuntu18_debug type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.debug.tar.gz # Latest release candidates, no fault-injector, no assertion: @@ -147,25 +147,25 @@ resources: - name: bin_gpdb6_centos6 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.tar.gz - name: bin_gpdb6_centos7 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.tar.gz - name: bin_gpdb6_rhel8 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.tar.gz - name: bin_gpdb6_ubuntu18 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.tar.gz @@ -173,28 +173,28 @@ resources: - name: bin_diskquota_gpdb6_rhel6 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel6_x86_64.tar.gz - name: bin_diskquota_gpdb6_rhel7 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel7_x86_64.tar.gz - name: bin_diskquota_gpdb6_rhel8 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel8_x86_64.tar.gz - name: bin_diskquota_gpdb6_ubuntu18 type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz @@ -204,28 +204,28 @@ resources: type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_rhel6_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel7_intermediates type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_rhel7_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel8_intermediates type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb6.tar.gz - name: bin_diskquota_gpdb6_ubuntu18_intermediates type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz # Rel @@ -233,56 +233,56 @@ resources: type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates_release/diskquota/diskquota_rhel6_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel7_intermediates_rel type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates_release/diskquota/diskquota_rhel7_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel8_intermediates_rel type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb6.tar.gz - name: bin_diskquota_gpdb6_ubuntu18_intermediates_rel type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) versioned_file: intermediates_release/diskquota/diskquota_ubuntu18_gpdb6.tar.gz # For uploading to the release bucket - name: bin_diskquota_gpdb6_rhel6_release type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - name: bin_diskquota_gpdb6_rhel7_release type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - name: bin_diskquota_gpdb6_rhel8_release type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - name: bin_diskquota_gpdb6_ubuntu18_release type: gcs source: - bucket: ((gcs-bucket)) + bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz @@ -291,10 +291,10 @@ resources: type: gcs source: bucket: gpdb-extensions-concourse-resources - json_key: ((extensions-gcs-service-account-key)) + json_key: ((extension/extensions-gcs-service-account-key)) regexp: dependencies/cmake-(.*)-linux-x86_64.sh - name: slack_notify type: slack-alert source: - url: ((extensions-slack-webhook)) + url: ((extension/extensions-slack-webhook)) From 338f716f3be1f66c194b19e39eb6ef79c971b5cf Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Tue, 23 Aug 2022 10:04:06 +0800 Subject: [PATCH 206/330] [cmake] Use C compiler to link the target. (#230) Since we didn't implement anything in C++, let's invoke C compiler to link the target. See: https://cmake.org/cmake/help/latest/prop_tgt/LINKER_LANGUAGE.html#prop_tgt:LINKER_LANGUAGE --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cb1c80e73e9..583f7ddb328 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,7 +96,7 @@ set_target_properties( PROPERTIES OUTPUT_NAME ${DISKQUOTA_BINARY_NAME} PREFIX "" C_STANDARD 99 - LINKER_LANGUAGE "CXX") + LINKER_LANGUAGE "C") # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) From 872b3fbc3210dd6ca40ff6edd72a1d4c6acd9a6f Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 24 Aug 2022 13:03:16 +0800 Subject: [PATCH 207/330] Scripts for new pipeline naming rules (#231) Concourse is deprecating some special characters in the pipeline name. See details at https://concourse-ci.org/config-basics.html#schema.identifier The pipelines will be named as: [_test].[branch_name][.user_defined_postfix] - Pipeline type 'release' is renamed to 'rel' - Pipeline type 'commit' is renamed to 'merge' - '_test' suffix will be added to pipeline type for pipeline debugging - Instead of setting the whole name of pipeline, user can only set the postfix. Patch from https://github.com/pivotal/ip4r/pull/2 Co-authored-by: Chen Mulong Co-authored-by: Xing Guo --- concourse/README.md | 27 +++++++++-------- concourse/fly.sh | 72 +++++++++++++++++++++++++++++---------------- 2 files changed, 60 insertions(+), 39 deletions(-) diff --git a/concourse/README.md b/concourse/README.md index 4106ce396ad..14492679bac 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -2,20 +2,21 @@ ## Naming Prefix Rule -- `PR:` for pull-request pipelines -- `COMMIT::` for branch pipelines. It will be executed when a commit committed/merged into the branch. -- `DEV:_[any_other_info]` for personal development usage. Put your name into the pipeline name so others can know who own it. +- `pr.` for pull-request pipelines +- `merge..` for branch pipelines. It will be executed when a commit committed/merged into the branch. +- `dev...` for personal development usage. Put your name into the pipeline name so others can know who own it. +- `_test..` for pipeline debugging. ## Pipelines for daily work ### PR Pipeline -https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/PR:diskquota +https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/pr.diskquota ### Main Branch Pipeline -The development happens on the `gpdb` branch. The commit pipeline for the `gpdb` -https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/COMMIT:diskquota:gpdb +The development happens on the `gpdb` branch. The merge pipeline for the `gpdb` branch is +https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/merge.diskquota:gpdb # Fly a pipeline @@ -38,10 +39,10 @@ https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/COMMIT:diskquota:gpdb ./fly.sh -t extension -c pr ``` -## Fly the commit pipeline +## Fly the merge pipeline ``` -./fly.sh -t extension -c commit +./fly.sh -t extension -c merge ``` ## Fly the release pipeline @@ -54,24 +55,24 @@ The release pipeline should be located in https://prod.ci.gpdb.pivotal.io # Login to prod fly -t prod login -c https://prod.ci.gpdb.pivotal.io # Fly the release pipeline -./fly.sh -t prod -c release +./fly.sh -t prod -c rel ``` To fly a release pipeline from a specific branch: ``` -./fly.sh -t -c release -b release/. +./fly.sh -t -c rel -b release/. ``` ## Fly the dev pipeline ``` -./fly.sh -t extension -c dev -p _diskquota -b +./fly.sh -t extension -c dev -p -b ``` ## Webhook -By default, the PR and commit pipelines are using webhook instead of polling to trigger a build. The webhook URL will be printed when flying such a pipeline by `fly.sh`. The webhook needs to be set in the `github repository` -> `Settings` -> `Webhooks` with push notification enabled. +By default, the PR and merge pipelines are using webhook instead of polling to trigger a build. The webhook URL will be printed when flying such a pipeline by `fly.sh`. The webhook needs to be set in the `github repository` -> `Settings` -> `Webhooks` with push notification enabled. To test if the webhook works, use `curl` to send a `POST` request to the hook URL with some random data. If it is the right URL, the relevant resource will be refreshed on the Concourse UI. The command line looks like: @@ -83,6 +84,6 @@ curl --data-raw "foo" ## PR pipeline is not triggered. -The PR pipeline relies on the webhook to detect the new PR. However, due the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/PR:diskquota/resources/diskquota_pr and click ⟳ button there. +The PR pipeline relies on the webhook to detect the new PR. However, due to the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/pr.diskquota/resources/diskquota_pr and click ⟳ button there. TIPS: Just don't fork, name your branch as `/` and push it here to create PR. diff --git a/concourse/fly.sh b/concourse/fly.sh index 7ee7eeb1b33..37c3bbe6a45 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -2,25 +2,28 @@ set -e -workspace=${WORKSPACE:-"$HOME/workspace"} fly=${FLY:-"fly"} -echo "'workspace' location: ${workspace}" echo "'fly' command: ${fly}" echo "" +proj_name="diskquota" usage() { - echo "Usage: $0 -t -c [-p ] [-b branch]" 1>&2 if [ -n "$1" ]; then - echo "$1" + echo "$1" 1>&2 + echo "" 1>&2 fi + + echo "Usage: $0 -t -c [-p ] [-b branch] [-T]" + echo "Options:" + echo " '-T' adds '_test' suffix to the pipeline type. Useful for pipeline debugging." exit 1 } # Parse command line options -while getopts ":c:t:p:b:" o; do +while getopts ":c:t:p:b:T" o; do case "${o}" in c) - # pipeline type/config. pr/commit/dev/release + # pipeline type/config. pr/merge/dev/rel pipeline_config=${OPTARG} ;; t) @@ -29,12 +32,15 @@ while getopts ":c:t:p:b:" o; do ;; p) # pipeline name - pipeline_name=${OPTARG} + postfix=${OPTARG} ;; b) # branch name branch=${OPTARG} ;; + T) + test_suffix="_test" + ;; *) usage "" ;; @@ -46,52 +52,66 @@ if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then usage "" fi +pipeline_type="" # Decide ytt options to generate pipeline case ${pipeline_config} in pr) - if [ -z "${pipeline_name}" ]; then - pipeline_name="PR:diskquota" - fi + pipeline_type="pr" config_file="pr.yml" - hook_res="diskquota_pr" + hook_res="${proj_name}_pr" ;; - commit) - if [ -z "${pipeline_name}" ]; then - pipeline_name="COMMIT:diskquota:gpdb" - fi + merge|commit) # Default branch is 'gpdb' as it is our main branch if [ -z "${branch}" ]; then branch="gpdb" fi + pipeline_type="merge" config_file="commit.yml" - hook_res="diskquota_commit" + hook_res="${proj_name}_commit" ;; dev) - if [ -z "${pipeline_name}" ]; then - usage "'-p' needs to be supplied to specify the pipeline name for flying a 'dev' pipeline." + if [ -z "${postfix}" ]; then + usage "'-p' needs to be supplied to specify the pipeline name postfix for flying a 'dev' pipeline." + fi + if [ -z "${branch}" ]; then + usage "'-b' needs to be supplied to specify the branch for flying a 'dev' pipeline." fi - pipeline_name="DEV:${pipeline_name}" + pipeline_type="dev" config_file="dev.yml" ;; - release) + release|rel) # Default branch is 'gpdb' as it is our main branch if [ -z "${branch}" ]; then branch="gpdb" fi - if [ -z "${pipeline_name}" ]; then - pipeline_name="RELEASE:diskquota:${branch}" - fi + pipeline_type="rel" config_file="release.yml" - hook_res="diskquota_commit" + hook_res="${proj_name}_commit" ;; *) usage "" ;; esac -yml_path="/tmp/diskquota_pipeline.yml" +yml_path="/tmp/${proj_name}.yml" my_path=$(realpath "${BASH_SOURCE[0]}") ytt_base=$(dirname "${my_path}")/pipeline +# pipeline cannot contain '/' +pipeline_name=${pipeline_name/\//"_"} + +# Generate pipeline name +if [ -n "${test_suffix}" ]; then + pipeline_type="${pipeline_type}_test" +fi +pipeline_name="${pipeline_type}.${proj_name}" +if [ -n "${branch}" ]; then + pipeline_name="${pipeline_name}.${branch}" +fi +if [ -n "${postfix}" ]; then + pipeline_name="${pipeline_name}.${postfix}" +fi +# pipeline cannot contain '/' +pipeline_name=${pipeline_name/\//"_"} ytt --data-values-file "${ytt_base}/res_def.yml" \ -f "${ytt_base}/base.lib.yml" \ @@ -108,7 +128,7 @@ set -v sp \ -p "${pipeline_name}" \ -c "${yml_path}" \ - -v "diskquota-branch=${branch}" + -v "${proj_name}-branch=${branch}" set +v if [ "${pipeline_config}" == "dev" ]; then From 1f6a2e0d860ba37f65a3989ab87dbeda7762804a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 25 Aug 2022 11:25:32 +0800 Subject: [PATCH 208/330] Log readiness message only once (#229) Currently a diskquota worker will log an error message when the database is not ready to be mornitored by Diskquota once for each iteration. When the naptime is small, the log will be flooded by this error message. This patch adds a flag in the worker map in the shared memory so that the error message will only be logged once per startup. --- diskquota.c | 54 ++++++++++++++++++- diskquota.h | 9 ++-- quotamodel.c | 17 +++--- tests/regress/diskquota_schedule | 1 + .../expected/test_readiness_logged.out | 37 +++++++++++++ tests/regress/sql/test_readiness_logged.sql | 22 ++++++++ 6 files changed, 129 insertions(+), 11 deletions(-) create mode 100644 tests/regress/expected/test_readiness_logged.out create mode 100644 tests/regress/sql/test_readiness_logged.sql diff --git a/diskquota.c b/diskquota.c index c552acc84f6..db5fb4a952e 100644 --- a/diskquota.c +++ b/diskquota.c @@ -25,6 +25,8 @@ #include "cdb/cdbvars.h" #include "commands/dbcommands.h" #include "executor/spi.h" +#include "libpq/libpq-be.h" +#include "miscadmin.h" #include "port/atomics.h" #include "storage/ipc.h" #include "storage/proc.h" @@ -32,6 +34,7 @@ #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/faultinjector.h" +#include "utils/guc.h" #include "utils/ps_status.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -93,6 +96,48 @@ diskquota_is_paused() return paused; } +bool +diskquota_is_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + bool is_readiness_logged; + + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + { + DiskQuotaWorkerEntry *hash_entry; + bool found; + + hash_entry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); + is_readiness_logged = found ? hash_entry->is_readiness_logged : false; + } + LWLockRelease(diskquota_locks.worker_map_lock); + + return is_readiness_logged; +} + +void +diskquota_set_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + + /* + * We actually need ROW EXCLUSIVE lock here. Given that the current worker + * is the the only process that modifies the entry, it is safe to only take + * the shared lock. + */ + LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + { + DiskQuotaWorkerEntry *hash_entry; + bool found; + + hash_entry = + (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); + hash_entry->is_readiness_logged = true; + } + LWLockRelease(diskquota_locks.worker_map_lock); +} + /* functions of disk quota*/ void _PG_init(void); void _PG_fini(void); @@ -257,6 +302,12 @@ disk_quota_worker_main(Datum main_arg) { char *dbname = MyBgworkerEntry->bgw_name; + MyProcPort = (Port *)calloc(1, sizeof(Port)); + MyProcPort->database_name = dbname; /* To show the database in the log */ + + /* Disable ORCA to avoid fallback */ + optimizer = false; + ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); /* Establish signal handlers before unblocking signals. */ @@ -1010,7 +1061,8 @@ worker_create_entry(Oid dbid) { workerentry->handle = NULL; pg_atomic_write_u32(&(workerentry->epoch), 0); - workerentry->is_paused = false; + workerentry->is_paused = false; + workerentry->is_readiness_logged = false; } LWLockRelease(diskquota_locks.worker_map_lock); diff --git a/diskquota.h b/diskquota.h index b86e7e72f42..9f6a1e30b1e 100644 --- a/diskquota.h +++ b/diskquota.h @@ -137,8 +137,9 @@ typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; struct DiskQuotaWorkerEntry { Oid dbid; - pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ - bool is_paused; /* true if this worker is paused */ + pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ + bool is_paused; /* true if this worker is paused */ + bool is_readiness_logged; /* true if we have logged the error message for not ready */ // NOTE: this field only can access in diskquota launcher, in other process it is dangling pointer BackgroundWorkerHandle *handle; @@ -182,6 +183,8 @@ extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); extern bool worker_increase_epoch(Oid database_oid); extern unsigned int worker_get_epoch(Oid database_oid); extern bool diskquota_is_paused(void); -extern void do_check_diskquota_state_is_ready(void); +extern bool do_check_diskquota_state_is_ready(void); +extern bool diskquota_is_readiness_logged(void); +extern void diskquota_set_readiness_logged(void); #endif diff --git a/quotamodel.c b/quotamodel.c index 74f748e6a85..8e21af20574 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -564,8 +564,7 @@ check_diskquota_state_is_ready(void) PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; dispatch_my_db_to_all_segments(); - do_check_diskquota_state_is_ready(); - is_ready = true; + is_ready = do_check_diskquota_state_is_ready(); } PG_CATCH(); { @@ -589,7 +588,8 @@ check_diskquota_state_is_ready(void) } /* - * Check whether the diskquota state is ready. Throw an error if it is not. + * Check whether the diskquota state is ready. + * Throw an error or return false if it is not. * * For empty database, table diskquota.state would be ready after * 'CREATE EXTENSION diskquota;'. But for non-empty database, @@ -597,7 +597,7 @@ check_diskquota_state_is_ready(void) * manually to get all the table size information and * store them into table diskquota.table_size */ -void +bool do_check_diskquota_state_is_ready(void) { int ret; @@ -621,14 +621,17 @@ do_check_diskquota_state_is_ready(void) int state; bool isnull; - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - state = isnull ? DISKQUOTA_UNKNOWN_STATE : DatumGetInt32(dat); + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + state = isnull ? DISKQUOTA_UNKNOWN_STATE : DatumGetInt32(dat); + bool is_ready = state == DISKQUOTA_READY_STATE; - if (state != DISKQUOTA_READY_STATE) + if (!is_ready && !diskquota_is_readiness_logged()) { + diskquota_set_readiness_logged(); ereport(ERROR, (errmsg("[diskquota] diskquota is not ready"), errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); } + return is_ready; } /* diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 899d60f6d25..61ccc9337cd 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,5 +1,6 @@ test: config test: test_create_extension +test: test_readiness_logged test: test_init_table_size_table test: test_relation_size test: test_relation_cache diff --git a/tests/regress/expected/test_readiness_logged.out b/tests/regress/expected/test_readiness_logged.out new file mode 100644 index 00000000000..116d2612ffa --- /dev/null +++ b/tests/regress/expected/test_readiness_logged.out @@ -0,0 +1,37 @@ +CREATE DATABASE test_log_readiness; +\c test_log_readiness +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +SELECT pg_sleep(1); --Wait for the check completes + pg_sleep +---------- + +(1 row) + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + count +------- + 1 +(1 row) + +\! gpstop -raf > /dev/null +\c +SELECT pg_sleep(1); --Wait for the check completes + pg_sleep +---------- + +(1 row) + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + count +------- + 2 +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_log_readiness; diff --git a/tests/regress/sql/test_readiness_logged.sql b/tests/regress/sql/test_readiness_logged.sql new file mode 100644 index 00000000000..d6cadc9ce46 --- /dev/null +++ b/tests/regress/sql/test_readiness_logged.sql @@ -0,0 +1,22 @@ +CREATE DATABASE test_log_readiness; +\c test_log_readiness + +CREATE TABLE t (i int) DISTRIBUTED BY (i); + +CREATE EXTENSION diskquota; +SELECT pg_sleep(1); --Wait for the check completes + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + +\! gpstop -raf > /dev/null +\c +SELECT pg_sleep(1); --Wait for the check completes + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_log_readiness; From c28180c985e5b27dc0d2f5e0aa1bc7a32eb49521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:17:31 +0800 Subject: [PATCH 209/330] Fix flaky test due to slow worker startup (#232) When creating the extension, a diskquota worker will be started to mornitor the db. However, it might take longer than expected until it checks whether it is ready to monitor, which will make some tests flaky. This patch increases the time to wait for worker startup to avoid flaky tests. --- tests/regress/expected/test_readiness_logged.out | 8 ++++---- tests/regress/sql/test_readiness_logged.sql | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/regress/expected/test_readiness_logged.out b/tests/regress/expected/test_readiness_logged.out index 116d2612ffa..bd2bfae966e 100644 --- a/tests/regress/expected/test_readiness_logged.out +++ b/tests/regress/expected/test_readiness_logged.out @@ -1,10 +1,10 @@ -CREATE DATABASE test_log_readiness; -\c test_log_readiness +CREATE DATABASE test_readiness_logged; +\c test_readiness_logged CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; WARNING: [diskquota] diskquota is not ready because current database is not empty HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -SELECT pg_sleep(1); --Wait for the check completes +SELECT pg_sleep(5); --Wait for the check completes pg_sleep ---------- @@ -34,4 +34,4 @@ WHERE logmessage = '[diskquota] diskquota is not ready'; DROP EXTENSION diskquota; \c contrib_regression -DROP DATABASE test_log_readiness; +DROP DATABASE test_readiness_logged; diff --git a/tests/regress/sql/test_readiness_logged.sql b/tests/regress/sql/test_readiness_logged.sql index d6cadc9ce46..84f75b55b7d 100644 --- a/tests/regress/sql/test_readiness_logged.sql +++ b/tests/regress/sql/test_readiness_logged.sql @@ -1,10 +1,10 @@ -CREATE DATABASE test_log_readiness; -\c test_log_readiness +CREATE DATABASE test_readiness_logged; +\c test_readiness_logged CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; -SELECT pg_sleep(1); --Wait for the check completes +SELECT pg_sleep(5); --Wait for the check completes SELECT count(*) FROM gp_toolkit.gp_log_database WHERE logmessage = '[diskquota] diskquota is not ready'; @@ -19,4 +19,4 @@ WHERE logmessage = '[diskquota] diskquota is not ready'; DROP EXTENSION diskquota; \c contrib_regression -DROP DATABASE test_log_readiness; +DROP DATABASE test_readiness_logged; From 2ab5f1f79cfbfddf1fbd943d8f40faa8ff02a679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xuebin=20Su=20=28=E8=8B=8F=E5=AD=A6=E6=96=8C=29?= <12034000+xuebinsu@users.noreply.github.com> Date: Wed, 31 Aug 2022 11:40:11 +0800 Subject: [PATCH 210/330] Downgrade severity of the readiness issue (#235) Downgrade the severity of the readiness check failure from `ERROR` to `WARNING` so that the error stack will not be logged. --- quotamodel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 8e21af20574..62d4c9d23b6 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -628,8 +628,8 @@ do_check_diskquota_state_is_ready(void) if (!is_ready && !diskquota_is_readiness_logged()) { diskquota_set_readiness_logged(); - ereport(ERROR, (errmsg("[diskquota] diskquota is not ready"), - errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); + ereport(WARNING, (errmsg("[diskquota] diskquota is not ready"), + errhint("please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota"))); } return is_ready; } From ef56ddab1eaeda0ea0e50166e1ddaf34c349d814 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 31 Aug 2022 12:05:09 +0800 Subject: [PATCH 211/330] Fix creating artifact (#234) The artifact was tared without "c", which is a tar only but not gzipped. untar it with "xzvf" will report error: " tar xzvf diskquota_rhel7_gpdb6.tar.gz gzip: stdin: not in gzip format tar: Child returned status 1 tar: Error is not recoverable: exiting now " --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 583f7ddb328..cefc9b42f84 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -145,7 +145,7 @@ add_custom_target(create_artifact COMMAND ${CMAKE_COMMAND} --build . --target package COMMAND - ${CMAKE_COMMAND} -E tar cvf ${artifact_NAME} "${tgz_NAME}.tar.gz") + ${CMAKE_COMMAND} -E tar czvf ${artifact_NAME} "${tgz_NAME}.tar.gz") # packing end # Create build-info From 082ac9a000f2b3ebd803fb69ea8517432aa2148b Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 15 Sep 2022 11:21:55 +0800 Subject: [PATCH 212/330] Dynamic bgworker for diskquota extension As the num of bgworkers is limited in gpdb, if we want to support unlimited numbers of database to run diskquota extension , we need to dynamically allocate bgworkers for databases. Each bgworker only runs "refresh_disk_quota_model" once, then exits. GUC diskquota.max_workers is added in this pr, to set the max bgworkers running in parallel. Const MAX_NUM_MONITORED_DB, the databases' num is still limited as we store each database's status in shared memory. When there are fewer databases running diskquota extension than diskquota.max_workers, will start one bgworker for each database statically. Otherwise, will start bgworkers dynamically. Launcher: * workers: in shared memory freeWorkers,runningWorkers are 2 double linked list. Move a worker from freeWorkers to runningWorkers when picking up a worker to serve for a database. Move a worker from runningWorkers to freeWorkers when the process exits. The length of freeWorkers plus runningWorkers is diskquota.max_workers. And each worker has a workerId, referenced by DiskquotaDBEntry. * dbArray: an array in shared memory too, the length of it is MAX_NUM_MONITORED_DB, storing DiskquotaDBEntry info. The related things to dbArray: - curDB: an iterator pointer for DBArray. When curDB is NULL means that diskquota has finished one loop, and is ready for the next loop. When curDB is dbArrayTail, it means it finished on the loop but needs to sleep; otherwise, it points to a DB needed to run. - next_db(): function to pick the next database to run. * DiskquotaDBEntry: - in_use: it holds a diskquota database info. - workerId: if the workerId is not Invalid, it means the db is running. To check if a db is running by this field. If the naptime is short, and after one loop, the database is still running, then it will not be picked up to run. - inited: check If the things for the diskquota extension, especially the items in shared memory, have been inited. * Diskquota worker: Each diskquota extension worker has something in shared memory, inited by function init_disk_quota_model,. * Schedule If it is ok to start workers, then pick one db in dbArray from beginning to end to run. Otherwise, it means it's not time to start workers or there are no available free workers. For the first reason, just sleep until timeout. For the second point, to wait for the SIGUSR1 signal which means there are diskquota workers exiting and workers freed. --- diskquota.c | 960 +++++++++++++----- diskquota.h | 80 +- diskquota_utility.c | 137 +-- gp_activetable.c | 60 +- gp_activetable.h | 3 +- quotamodel.c | 224 ++-- tests/regress/diskquota_schedule | 3 +- tests/regress/expected/test_extension.out | 148 ++- .../regress/expected/test_update_db_cache.out | 6 + .../regress/expected/test_worker_schedule.out | 648 ++++++++++++ tests/regress/sql/config.sql | 1 + tests/regress/sql/test_extension.sql | 12 + tests/regress/sql/test_update_db_cache.sql | 1 + tests/regress/sql/test_worker_schedule.sql | 225 ++++ 14 files changed, 2025 insertions(+), 483 deletions(-) create mode 100644 tests/regress/expected/test_worker_schedule.out create mode 100644 tests/regress/sql/test_worker_schedule.sql diff --git a/diskquota.c b/diskquota.c index db5fb4a952e..a8d14fe28d7 100644 --- a/diskquota.c +++ b/diskquota.c @@ -30,11 +30,13 @@ #include "port/atomics.h" #include "storage/ipc.h" #include "storage/proc.h" +#include "storage/sinval.h" #include "tcop/idle_resource_cleaner.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/faultinjector.h" #include "utils/guc.h" +#include "utils/memutils.h" #include "utils/ps_status.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -62,39 +64,54 @@ extern int usleep(useconds_t usec); // in static volatile sig_atomic_t got_sighup = false; static volatile sig_atomic_t got_sigterm = false; static volatile sig_atomic_t got_sigusr1 = false; +static volatile sig_atomic_t got_sigusr2 = false; /* GUC variables */ int diskquota_naptime = 0; int diskquota_max_active_tables = 0; int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ bool diskquota_hardlimit = false; +int diskquota_max_workers = 10; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; -/* using hash table to support incremental update the table size entry.*/ -HTAB *disk_quota_worker_map = NULL; -static int num_db = 0; +// Only access in diskquota worker, different from each worker. +// a pointer to DiskquotaLauncherShmem->workerEntries in shared memory +static DiskQuotaWorkerEntry *volatile MyWorkerInfo = NULL; -bool -diskquota_is_paused() -{ - Assert(MyDatabaseId != InvalidOid); - bool paused; +// how many database diskquota are monitoring on +static int num_db = 0; - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - { - DiskQuotaWorkerEntry *hash_entry; - bool found; +// in shared memory, only for launcher process +static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; - hash_entry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); - paused = found ? hash_entry->is_paused : false; - } - LWLockRelease(diskquota_locks.worker_map_lock); +/* + * the current db to be run or running. + * a in-process static value, pointer to shared memory + * + * curDB has 3 different kinds of values: + * 1) when curDB is NULL, it means we can start workers + * for the first databases in DiskquotaLauncherShmem->dbArray + * + * 2) when curDB is DiskquotaLauncherShmem->dbArrayTail, + * means it had finish one loop just now. And should + * sleep for ${diskquota.naptime} sconds. + * + * 3) when curDB is pointing to any db entry in + * DiskquotaLauncherShmem->dbArray[], it means it is in + * one loop to start each worker for each database. + */ +static DiskquotaDBEntry *curDB = NULL; - return paused; -} +/* + * bgworker handles, in launcher local memory, + * bgworker_handles[i] is the handle of DiskquotaLauncherShmem->[i] + * the actually useable reference is DiskquotaLauncherShmem->{freeWorkers, runningWorkers} + * + * size: GUC diskquota_max_workers + */ +BackgroundWorkerHandle **bgworker_handles; bool diskquota_is_readiness_logged() @@ -102,16 +119,15 @@ diskquota_is_readiness_logged() Assert(MyDatabaseId != InvalidOid); bool is_readiness_logged; - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; - bool found; + MonitorDBEntry hash_entry; + bool found; - hash_entry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); is_readiness_logged = found ? hash_entry->is_readiness_logged : false; } - LWLockRelease(diskquota_locks.worker_map_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); return is_readiness_logged; } @@ -126,16 +142,15 @@ diskquota_set_readiness_logged() * is the the only process that modifies the entry, it is safe to only take * the shared lock. */ - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; - bool found; + MonitorDBEntry hash_entry; + bool found; - hash_entry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); hash_entry->is_readiness_logged = true; } - LWLockRelease(diskquota_locks.worker_map_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); } /* functions of disk quota*/ @@ -147,20 +162,65 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); static void define_guc_variables(void); -static bool start_worker_by_dboid(Oid dbid); -static void start_workers_from_dblist(void); +static bool start_worker(void); static void create_monitor_db_table(void); static void add_dbid_to_database_list(Oid dbid); static void del_dbid_from_database_list(Oid dbid); static void process_extension_ddl_message(void); static void do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message); -static void try_kill_db_worker(Oid dbid); static void terminate_all_workers(void); static void on_add_db(Oid dbid, MessageResult *code); static void on_del_db(Oid dbid, MessageResult *code); static bool is_valid_dbid(Oid dbid); extern void invalidate_database_rejectmap(Oid dbid); +static void FreeWorkerOnExit(int code, Datum arg); +static void FreeWorker(DiskQuotaWorkerEntry *worker); +static void init_database_list(void); +static bool CanLaunchWorker(void); +static DiskquotaDBEntry *next_db(void); +static DiskQuotaWorkerEntry *next_worker(void); +static DiskquotaDBEntry *add_db_entry(Oid dbid); +static void release_db_entry(Oid dbid); +static char *get_db_name(Oid dbid); +static void reset_worker(DiskQuotaWorkerEntry *dq_worker); +static void vacuum_db_entry(DiskquotaDBEntry *db); +static void init_bgworker_handles(void); +static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); +static void free_bgworker_handle(uint32 worker_id); + +bool +diskquota_is_paused() +{ + Assert(MyDatabaseId != InvalidOid); + bool paused = false; + bool found; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + if (found) + { + paused = entry->paused; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + return paused; +} + +/* + * diskquota_launcher_shmem_size + * Compute space needed for diskquota launcher related shared memory + */ +Size +diskquota_launcher_shmem_size(void) +{ + Size size; + + size = MAXALIGN(sizeof(DiskquotaLauncherShmemStruct)); + size = add_size(size, mul_size(diskquota_max_workers, + sizeof(struct DiskQuotaWorkerEntry))); // hidden memory for DiskQuotaWorkerEntry + size = add_size(size, mul_size(MAX_NUM_MONITORED_DB, sizeof(struct DiskquotaDBEntry))); // hidden memory for dbArray + return size; +} /* * Entrypoint of diskquota module. * @@ -267,6 +327,21 @@ disk_quota_sigusr1(SIGNAL_ARGS) errno = save_errno; } +/* + * Signal handler for SIGUSR2 + * Set a flag to tell the launcher to handle extension ddl message + */ +static void +disk_quota_sigusr2(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigusr2 = true; + + if (MyProc) SetLatch(&MyProc->procLatch); + + errno = save_errno; +} /* * Define GUC variables used by diskquota */ @@ -289,6 +364,10 @@ define_guc_variables(void) &diskquota_worker_timeout, 60, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomBoolVariable("diskquota.hard_limit", "Set this to 'on' to enable disk-quota hardlimit.", NULL, &diskquota_hardlimit, false, PGC_SIGHUP, 0, NULL, NULL, NULL); + DefineCustomIntVariable( + "diskquota.max_workers", + "Max number of backgroud workers to run diskquota extension, should be less than max_worker_processes.", + NULL, &diskquota_max_workers, 10, 1, max_worker_processes, PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -309,12 +388,24 @@ disk_quota_worker_main(Datum main_arg) optimizer = false; ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); - /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); pqsignal(SIGTERM, disk_quota_sigterm); pqsignal(SIGUSR1, disk_quota_sigusr1); + MyWorkerInfo = (DiskQuotaWorkerEntry *)DatumGetPointer(MyBgworkerEntry->bgw_main_arg); + Assert(MyWorkerInfo != NULL); + if (!MyWorkerInfo->dbEntry->inited) + ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); + /* + * The shmem exit hook is registered after registering disk_quota_sigterm. + * So if the SIGTERM arrives before this statement, the shmem exit hook + * won't be called. + * + * TODO: launcher to free the unused worker? + */ + on_shmem_exit(FreeWorkerOnExit, 0); + /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); @@ -331,9 +422,10 @@ disk_quota_worker_main(Datum main_arg) * Initialize diskquota related local hash map and refresh model * immediately */ - init_disk_quota_model(); + init_disk_quota_model(MyWorkerInfo->dbEntry->id); - // check current binary version and SQL DLL version are matched + // FIXME: version check should be run for each starting bgworker? + // check current binary version and SQL DLL version are matched int times = 0; while (!got_sigterm) { @@ -389,6 +481,8 @@ disk_quota_worker_main(Datum main_arg) while (!got_sigterm) { int rc; + /* If the database has been inited before, no need to check the ready state again */ + if (MyWorkerInfo->dbEntry->inited) break; CHECK_FOR_INTERRUPTS(); @@ -398,9 +492,12 @@ disk_quota_worker_main(Datum main_arg) * After running UDF init_table_size_table() The state will changed to * be ready. */ - if (check_diskquota_state_is_ready()) + if (!diskquota_is_paused()) { - break; + if (check_diskquota_state_is_ready()) + { + break; + } } rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); @@ -423,26 +520,28 @@ disk_quota_worker_main(Datum main_arg) } } - /* if received sigterm, just exit the worker process */ - if (got_sigterm) - { - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); - /* clear the out-of-quota rejectmap in shared memory */ - invalidate_database_rejectmap(MyDatabaseId); - proc_exit(0); - } - - /* Refresh quota model with init mode */ - refresh_disk_quota_model(true); - - ereport(LOG, (errmsg("[diskquota] start bgworker loop for database: \"%s\"", dbname))); - /* - * Main loop: do this until the SIGTERM handler tells us to terminate - */ while (!got_sigterm) { int rc; + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); + if (!diskquota_is_paused()) + { + /* Refresh quota model with init mode */ + refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); + MyWorkerInfo->dbEntry->inited = true; + } + worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); + + // GPDB6 opend a MemoryAccount for us without asking us. + // and GPDB6 did not release the MemoryAccount after SPI finish. + // Reset the MemoryAccount although we never create it. + MemoryAccounting_Reset(); + + if (DiskquotaLauncherShmem->isDynamicWorker) + { + break; + } CHECK_FOR_INTERRUPTS(); /* @@ -470,20 +569,10 @@ disk_quota_worker_main(Datum main_arg) got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } - - SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); - - /* Do the work */ - if (!diskquota_is_paused()) refresh_disk_quota_model(false); - - /* Reset memory account to fix memory leak */ - MemoryAccounting_Reset(); - worker_increase_epoch(MyDatabaseId); } - ereport(LOG, (errmsg("[diskquota] bgworker for \"%s\" is being terminated by SIGTERM.", dbname))); - /* clear the out-of-quota rejectmap in shared memory */ - invalidate_database_rejectmap(MyDatabaseId); + if (got_sigterm) + ereport(LOG, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); proc_exit(0); } @@ -507,19 +596,20 @@ void disk_quota_launcher_main(Datum main_arg) { time_t loop_begin, loop_end; + MemoryContextSwitchTo(TopMemoryContext); + init_bgworker_handles(); /* establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); pqsignal(SIGTERM, disk_quota_sigterm); pqsignal(SIGUSR1, disk_quota_sigusr1); - + pqsignal(SIGUSR2, disk_quota_sigusr2); /* we're now ready to receive signals */ BackgroundWorkerUnblockSignals(); LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); extension_ddl_message->launcher_pid = MyProcPid; LWLockRelease(diskquota_locks.extension_ddl_message_lock); - /* * connect to our database 'diskquota'. launcher process will exit if * 'diskquota' database is not existed. @@ -538,20 +628,24 @@ disk_quota_launcher_main(Datum main_arg) */ create_monitor_db_table(); - /* - * firstly start worker processes for each databases with diskquota - * enabled. - */ - start_workers_from_dblist(); - - ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); - /* main loop: do this until the SIGTERM handler tells us to terminate. */ + init_database_list(); EnableClientWaitTimeoutInterrupt(); StartIdleResourceCleanupTimers(); loop_end = time(NULL); + + struct timeval nap; + nap.tv_sec = diskquota_naptime; + nap.tv_usec = 0; + TimestampTz loop_start_time = GetCurrentTimestamp(); + /* main loop: do this until the SIGTERM handler tells us to terminate. */ + ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); + curDB = NULL; + while (!got_sigterm) { - int rc; + int rc; + bool sigusr1 = false; + bool sigusr2 = false; CHECK_FOR_INTERRUPTS(); @@ -561,26 +655,31 @@ disk_quota_launcher_main(Datum main_arg) * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); - ResetLatch(&MyProc->procLatch); - - // wait at least one time slice, avoid 100% CPU usage - if (!diskquota_naptime) usleep(1); - /* Emergency bailout if postmaster has died */ - if (rc & WL_POSTMASTER_DEATH) + if (nap.tv_sec != 0 || nap.tv_usec != 0) { - ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); - proc_exit(1); + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); + ResetLatch(&MyProc->procLatch); + + // wait at least one time slice, avoid 100% CPU usage + if (!diskquota_naptime) usleep(1); + + /* Emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + { + ereport(LOG, (errmsg("[diskquota launcher] launcher is being terminated by postmaster death."))); + proc_exit(1); + } } - /* process extension ddl message */ - if (got_sigusr1) + if (got_sigusr2) { - got_sigusr1 = false; + got_sigusr2 = false; CancelIdleResourceCleanupTimers(); process_extension_ddl_message(); StartIdleResourceCleanupTimers(); + sigusr2 = true; } /* in case of a SIGHUP, just reload the configuration. */ @@ -591,6 +690,56 @@ disk_quota_launcher_main(Datum main_arg) ProcessConfigFile(PGC_SIGHUP); StartIdleResourceCleanupTimers(); } + + /* + * When the bgworker for diskquota worker starts or stops, + * postmsater prosess will send sigusr1 to launcher as + * worker.bgw_notify_pid has been set to launcher pid. + */ + if (got_sigusr1) + { + got_sigusr1 = false; + sigusr1 = true; + } + + /* + * modify wait time + */ + long secs; + int microsecs; + TimestampDifference(GetCurrentTimestamp(), + TimestampTzPlusMilliseconds(loop_start_time, diskquota_naptime * 1000L), &secs, µsecs); + nap.tv_sec = secs; + nap.tv_usec = microsecs; + + if (curDB == DiskquotaLauncherShmem->dbArrayTail) + { + /* Have sleep enough time, should start another loop */ + if (nap.tv_sec == 0 && nap.tv_usec == 0) + { + loop_start_time = GetCurrentTimestamp(); + /* set the curDB pointing to the head of the db list */ + curDB = NULL; + } + /* do nothing, just to sleep untill the nap time is 0 */ + else + { + continue; + } + } + + /* If there are no enough workers to run db, we can firstly sleep to wait workers */ + if (nap.tv_sec == 0 && nap.tv_usec == 0) + { + nap.tv_sec = diskquota_naptime > 0 ? diskquota_naptime : 1; + nap.tv_usec = 0; + } + + while (curDB != DiskquotaLauncherShmem->dbArrayTail && CanLaunchWorker()) + { + start_worker(); + } + loop_begin = loop_end; loop_end = time(NULL); if (isAbnormalLoopTime(loop_end - loop_begin)) @@ -622,8 +771,23 @@ create_monitor_db_table(void) bool pushed_active_snap = false; bool ret = true; + /* + * Create function diskquota.diskquota_fetch_table_stat in launcher + * We need this function to distribute dbid to segments when creating + * a diskquota extension. + */ sql = "create schema if not exists diskquota_namespace;" - "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);"; + "create table if not exists diskquota_namespace.database_list(dbid oid not null unique);" + "DROP SCHEMA IF EXISTS " LAUNCHER_SCHEMA + " CASCADE;" + "CREATE SCHEMA " LAUNCHER_SCHEMA + ";" + "CREATE TYPE " LAUNCHER_SCHEMA + ".diskquota_active_table_type AS (TABLE_OID oid, TABLE_SIZE int8, GP_SEGMENT_ID " + "smallint);" + "CREATE FUNCTION " LAUNCHER_SCHEMA ".diskquota_fetch_table_stat(int4, oid[]) RETURNS setof " LAUNCHER_SCHEMA + ".diskquota_active_table_type AS '$libdir/" DISKQUOTA_BINARY_NAME + ".so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE;"; StartTransactionCommand(); @@ -682,7 +846,7 @@ create_monitor_db_table(void) * diskquota-enabled databases from diskquota_namespace.database_list */ static void -start_workers_from_dblist(void) +init_database_list(void) { TupleDesc tupdesc; int num = 0; @@ -696,6 +860,7 @@ start_workers_from_dblist(void) */ StartTransactionCommand(); PushActiveSnapshot(GetTransactionSnapshot()); + ret = SPI_connect(); if (ret != SPI_OK_CONNECT) ereport(ERROR, @@ -710,15 +875,16 @@ start_workers_from_dblist(void) { ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); - ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, laucher will exit. natts: "))); + ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); } for (i = 0; i < SPI_processed; i++) { - HeapTuple tup; - Oid dbid; - Datum dat; - bool isnull; + HeapTuple tup; + Oid dbid; + Datum dat; + bool isnull; + DiskquotaDBEntry *dbEntry; tup = SPI_tuptable->vals[i]; dat = SPI_getbinval(tup, tupdesc, 1, &isnull); @@ -730,11 +896,9 @@ start_workers_from_dblist(void) dbid))); continue; } - ereport(WARNING, (errmsg("[diskquota launcher] start workers"))); - if (!start_worker_by_dboid(dbid)) - ereport(ERROR, (errmsg("[diskquota launcher] start worker process of database(oid:%u) failed", dbid))); + dbEntry = add_db_entry(dbid); + if (dbEntry == NULL) continue; num++; - /* * diskquota only supports to monitor at most MAX_NUM_MONITORED_DB * databases @@ -748,11 +912,20 @@ start_workers_from_dblist(void) } } num_db = num; + /* As update_monitor_db_mpp needs to execute sql, so can not put in the loop above */ + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (dbEntry->in_use) + { + update_monitor_db_mpp(dbEntry->dbid, ADD_DB_TO_MONITOR, LAUNCHER_SCHEMA); + } + } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); - /* TODO: clean invalid database */ + if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; } /* @@ -773,7 +946,8 @@ process_extension_ddl_message() /* create/drop extension message must be valid */ if (local_extension_ddl_message.req_pid == 0 || local_extension_ddl_message.launcher_pid != MyProcPid) return; - ereport(LOG, (errmsg("[diskquota launcher]: received create/drop extension diskquota message"))); + ereport(LOG, + (errmsg("[diskquota launcher]: received create/drop extension diskquota message, extension launcher"))); do_process_extension_ddl_message(&code, local_extension_ddl_message); @@ -828,7 +1002,7 @@ do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_ break; case CMD_DROP_EXTENSION: on_del_db(local_extension_ddl_message.dbid, code); - num_db--; + if (num_db > 0) num_db--; *code = ERR_OK; break; default: @@ -856,6 +1030,57 @@ do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_ CommitTransactionCommand(); else AbortCurrentTransaction(); + /* update something in memory after transaction committed */ + if (ret) + { + PG_TRY(); + { + /* update_monitor_db_mpp runs sql to distribute dbid to segments */ + StartTransactionCommand(); + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + Oid dbid = local_extension_ddl_message.dbid; + int ret_code = SPI_connect(); + if (ret_code != SPI_OK_CONNECT) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unable to connect to execute internal query. return code: %d.", ret_code))); + } + switch (local_extension_ddl_message.cmd) + { + case CMD_CREATE_EXTENSION: + add_db_entry(dbid); + /* TODO: how about this failed? */ + update_monitor_db_mpp(dbid, ADD_DB_TO_MONITOR, LAUNCHER_SCHEMA); + if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; + break; + case CMD_DROP_EXTENSION: + /* terminate bgworker in release_db_entry rountine */ + release_db_entry(dbid); + update_monitor_db_mpp(dbid, REMOVE_DB_FROM_BEING_MONITORED, LAUNCHER_SCHEMA); + /* clear the out-of-quota rejectmap in shared memory */ + invalidate_database_rejectmap(dbid); + if (num_db <= diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = false; + break; + default: + ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", + local_extension_ddl_message.cmd))); + break; + } + SPI_finish(); + if (pushed_active_snap) PopActiveSnapshot(); + CommitTransactionCommand(); + } + PG_CATCH(); + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + EmitErrorReport(); + FlushErrorState(); + RESUME_INTERRUPTS(); + } + PG_END_TRY(); + } } /* @@ -891,12 +1116,6 @@ on_add_db(Oid dbid, MessageResult *code) PG_RE_THROW(); } PG_END_TRY(); - - if (!start_worker_by_dboid(dbid)) - { - *code = ERR_START_WORKER; - ereport(ERROR, (errmsg("[diskquota launcher] failed to start worker - dbid=%u", dbid))); - } } /* @@ -904,7 +1123,7 @@ on_add_db(Oid dbid, MessageResult *code) * do: * 1. kill the associated worker process * 2. delete dbid from diskquota_namespace.database_list - * 3. invalidate reject-map entries and monitoring_dbid_cache from shared memory + * 3. invalidate reject-map entries and monitored_dbid_cache from shared memory */ static void on_del_db(Oid dbid, MessageResult *code) @@ -915,9 +1134,6 @@ on_del_db(Oid dbid, MessageResult *code) ereport(ERROR, (errmsg("[diskquota launcher] invalid database oid"))); } - /* tell postmaster to stop this bgworker */ - try_kill_db_worker(dbid); - /* * delete dbid from diskquota_namespace.database_list set *code to * ERR_DEL_FROM_DB if any error occurs @@ -998,118 +1214,53 @@ del_dbid_from_database_list(Oid dbid) ret))); } -/* - * When drop exention database, diskquota laucher will receive a message - * to kill the diskquota worker process which monitoring the target database. - */ -static void -try_kill_db_worker(Oid dbid) -{ - DiskQuotaWorkerEntry *hash_entry; - bool found; - - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - hash_entry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_REMOVE, &found); - if (found) - { - BackgroundWorkerHandle *handle; - - handle = hash_entry->handle; - if (handle) - { - TerminateBackgroundWorker(handle); - pfree(handle); - } - } - LWLockRelease(diskquota_locks.worker_map_lock); -} - /* * When launcher exits, it should also terminate all the workers. */ static void terminate_all_workers(void) { - DiskQuotaWorkerEntry *hash_entry; - HASH_SEQ_STATUS iter; - - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - - hash_seq_init(&iter, disk_quota_worker_map); - - /* - * terminate the worker processes. since launcher will exit immediately, - * we skip to clear the disk_quota_worker_map and monitoring_dbid_cache - */ - while ((hash_entry = hash_seq_search(&iter)) != NULL) - { - if (hash_entry->handle) TerminateBackgroundWorker(hash_entry->handle); - } - LWLockRelease(diskquota_locks.worker_map_lock); -} - -static bool -worker_create_entry(Oid dbid) -{ - DiskQuotaWorkerEntry *workerentry = NULL; - bool found = false; - - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_ENTER, &found); - if (!found) - { - workerentry->handle = NULL; - pg_atomic_write_u32(&(workerentry->epoch), 0); - workerentry->is_paused = false; - workerentry->is_readiness_logged = false; - } - - LWLockRelease(diskquota_locks.worker_map_lock); - return found; -} - -static bool -worker_set_handle(Oid dbid, BackgroundWorkerHandle *handle) -{ - DiskQuotaWorkerEntry *workerentry = NULL; - bool found = false; - - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); - - workerentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_ENTER, &found); - if (found) - { - workerentry->handle = handle; - } - LWLockRelease(diskquota_locks.worker_map_lock); - if (!found) + dlist_iter iterdb; + DiskQuotaWorkerEntry *worker; + BackgroundWorkerHandle *handle; + LWLockAcquire(diskquota_locks.workerlist_lock, LW_SHARED); + dlist_foreach(iterdb, &DiskquotaLauncherShmem->runningWorkers) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(dbid)))); + worker = dlist_container(DiskQuotaWorkerEntry, node, iterdb.cur); + handle = get_bgworker_handle(worker->id); + if (handle != NULL) TerminateBackgroundWorker(handle); } - return found; + LWLockRelease(diskquota_locks.workerlist_lock); } /* * Dynamically launch an disk quota worker process. - * This function is called when laucher process receive - * a 'create extension diskquota' message. + * This function is called when launcher process + * schedules a database's diskquota worker to run. */ + static bool -start_worker_by_dboid(Oid dbid) +start_worker() { - BackgroundWorker worker; - BackgroundWorkerHandle *handle; - BgwHandleStatus status; - MemoryContext old_ctx; - char *dbname; - pid_t pid; - bool ret; - - /* Create entry first so that it can be checked by bgworker and QD. */ - worker_create_entry(dbid); - + BackgroundWorker worker; + bool ret; + DiskQuotaWorkerEntry *dq_worker; + DiskquotaDBEntry *dbEntry; + MemoryContext old_ctx; + char *dbname = NULL; + + dq_worker = next_worker(); + if (dq_worker == NULL) return false; + + /* pick a db run */ + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + dbEntry = next_db(); + if (dbEntry == DiskquotaLauncherShmem->dbArrayTail) goto Failed; + + dbEntry->workerId = dq_worker->id; + dq_worker->dbEntry = dbEntry; + /* free the BackgroundWorkerHandle used by last database */ + free_bgworker_handle(dq_worker->id); memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; @@ -1123,33 +1274,48 @@ start_worker_by_dboid(Oid dbid) worker.bgw_restart_time = BGW_NEVER_RESTART; sprintf(worker.bgw_library_name, DISKQUOTA_BINARY_NAME); sprintf(worker.bgw_function_name, "disk_quota_worker_main"); - - dbname = get_database_name(dbid); - Assert(dbname != NULL); + dbname = get_db_name(dbEntry->dbid); + if (dbname == NULL) goto Failed; snprintf(worker.bgw_name, sizeof(worker.bgw_name), "%s", dbname); pfree(dbname); + /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; - worker.bgw_main_arg = (Datum)0; + worker.bgw_main_arg = (Datum)PointerGetDatum(dq_worker); old_ctx = MemoryContextSwitchTo(TopMemoryContext); - ret = RegisterDynamicBackgroundWorker(&worker, &handle); + ret = RegisterDynamicBackgroundWorker(&worker, &(bgworker_handles[dq_worker->id])); MemoryContextSwitchTo(old_ctx); - if (!ret) return false; - status = WaitForBackgroundWorkerStartup(handle, &pid); + if (!ret) + { + elog(WARNING, "Create bgworker failed"); + goto Failed; + } + + BgwHandleStatus status; + pid_t pid; + status = WaitForBackgroundWorkerStartup(bgworker_handles[dq_worker->id], &pid); if (status == BGWH_STOPPED) - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), - errhint("More details may be available in the server log."))); + { + ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), + errhint("More details may be available in the server log."))); + goto Failed; + } if (status == BGWH_POSTMASTER_DIED) - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("cannot start background processes without postmaster"), - errhint("Kill all remaining database processes and restart the database."))); + { + ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("cannot start background processes without postmaster"), + errhint("Kill all remaining database processes and restart the database."))); + goto Failed; + } Assert(status == BGWH_STARTED); - - /* Save the handle to the worker map to check the liveness. */ - worker_set_handle(dbid, handle); + LWLockRelease(diskquota_locks.dblist_lock); return true; +Failed: + LWLockRelease(diskquota_locks.dblist_lock); + FreeWorker(dq_worker); + return false; } /* @@ -1168,41 +1334,38 @@ is_valid_dbid(Oid dbid) } bool -worker_increase_epoch(Oid database_oid) +worker_increase_epoch(Oid dbid) { - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - - bool found = false; - DiskQuotaWorkerEntry *workerentry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&database_oid, HASH_FIND, &found); + bool found = false; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); if (found) { - pg_atomic_fetch_add_u32(&(workerentry->epoch), 1); + pg_atomic_fetch_add_u32(&(entry->epoch), 1); } - LWLockRelease(diskquota_locks.worker_map_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); return found; } uint32 -worker_get_epoch(Oid database_oid) +worker_get_epoch(Oid dbid) { - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - - bool found = false; - uint32 epoch = 0; - DiskQuotaWorkerEntry *workerentry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&database_oid, HASH_FIND, &found); - + bool found = false; + uint32 epoch = 0; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); if (found) { - epoch = pg_atomic_read_u32(&(workerentry->epoch)); + epoch = pg_atomic_read_u32(&(entry->epoch)); } - LWLockRelease(diskquota_locks.worker_map_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); if (!found) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(database_oid)))); + errmsg("[diskquota] worker not found for database \"%s\"", get_db_name(dbid)))); } return epoch; } @@ -1224,16 +1387,14 @@ diskquota_status_check_soft_limit() // should run on coordinator only. Assert(IS_QUERY_DISPATCHER()); - bool found, paused; - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); + bool found, paused; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); { - DiskQuotaWorkerEntry *hash_entry; - - hash_entry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); - paused = found ? hash_entry->is_paused : false; + entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + paused = found ? entry->paused : false; } - LWLockRelease(diskquota_locks.worker_map_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); // if worker no booted, aka 'CREATE EXTENSION' not called, diskquota is paused if (!found) return "paused"; @@ -1250,20 +1411,11 @@ diskquota_status_check_hard_limit() bool hardlimit = diskquota_hardlimit; - bool found, paused; - LWLockAcquire(diskquota_locks.worker_map_lock, LW_SHARED); - { - DiskQuotaWorkerEntry *hash_entry; - - hash_entry = - (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&MyDatabaseId, HASH_FIND, &found); - paused = found ? hash_entry->is_paused : false; - } - LWLockRelease(diskquota_locks.worker_map_lock); - - // if worker booted and 'worker_map->is_paused == true' and hardlimit is enabled + bool paused = false; + paused = diskquota_is_paused(); + // if worker booted and 'is_paused == true' and hardlimit is enabled // hard limits should also paused - if (found && paused && hardlimit) return "paused"; + if (paused && hardlimit) return "paused"; return hardlimit ? "on" : "off"; } @@ -1417,3 +1569,279 @@ wait_for_worker_new_epoch(PG_FUNCTION_ARGS) } PG_RETURN_BOOL(false); } + +static void +FreeWorker(DiskQuotaWorkerEntry *worker) +{ + if (worker != NULL) + { + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + if (worker->dbEntry != NULL) + { + bool in_use = worker->dbEntry->in_use; + if (in_use && worker->dbEntry->workerId == worker->id) + { + worker->dbEntry->workerId = INVALID_WORKER_ID; + } + } + LWLockRelease(diskquota_locks.dblist_lock); + LWLockAcquire(diskquota_locks.workerlist_lock, LW_EXCLUSIVE); + dlist_delete(&worker->node); + worker->dbEntry = NULL; + dlist_push_head(&DiskquotaLauncherShmem->freeWorkers, &worker->node); + DiskquotaLauncherShmem->running_workers_num--; + LWLockRelease(diskquota_locks.workerlist_lock); + } +} + +static void +FreeWorkerOnExit(int code, Datum arg) +{ + if (MyWorkerInfo != NULL) + { + FreeWorker(MyWorkerInfo); + } +} + +static bool +CanLaunchWorker(void) +{ + if (dlist_is_empty(&DiskquotaLauncherShmem->freeWorkers)) + { + return false; + } + if (num_db <= 0) + { + return false; + } + if (DiskquotaLauncherShmem->running_workers_num >= num_db) + { + return false; + } + return true; +} + +void +init_launcher_shmem() +{ + bool found; + DiskquotaLauncherShmem = (DiskquotaLauncherShmemStruct *)ShmemInitStruct("Diskquota launcher Data", + diskquota_launcher_shmem_size(), &found); + memset(DiskquotaLauncherShmem, 0, diskquota_launcher_shmem_size()); + if (!found) + { + dlist_init(&DiskquotaLauncherShmem->freeWorkers); + dlist_init(&DiskquotaLauncherShmem->runningWorkers); + + // a pointer to the start address of hidden memory + uint8_t *hidden_memory_prt = (uint8_t *)DiskquotaLauncherShmem + MAXALIGN(sizeof(DiskquotaLauncherShmemStruct)); + + // get DiskQuotaWorkerEntry from the hidden memory + DiskQuotaWorkerEntry *worker = (DiskQuotaWorkerEntry *)hidden_memory_prt; + hidden_memory_prt += mul_size(diskquota_max_workers, sizeof(DiskQuotaWorkerEntry)); + + // get dbArray from the hidden memory + DiskquotaDBEntry *dbArray = (DiskquotaDBEntry *)hidden_memory_prt; + hidden_memory_prt += mul_size(MAX_NUM_MONITORED_DB, sizeof(struct DiskquotaDBEntry)); + + // get the dbArrayTail from the hidden memory + DiskquotaDBEntry *dbArrayTail = (DiskquotaDBEntry *)hidden_memory_prt; + + /* add all worker to the free worker list */ + DiskquotaLauncherShmem->running_workers_num = 0; + for (int i = 0; i < diskquota_max_workers; i++) + { + memset(&worker[i], 0, sizeof(DiskQuotaWorkerEntry)); + worker[i].id = i; + dlist_push_head(&DiskquotaLauncherShmem->freeWorkers, &worker[i].node); + } + + DiskquotaLauncherShmem->dbArray = dbArray; + DiskquotaLauncherShmem->dbArrayTail = dbArrayTail; + + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + { + memset(&DiskquotaLauncherShmem->dbArray[i], 0, sizeof(DiskquotaDBEntry)); + DiskquotaLauncherShmem->dbArray[i].id = i; + DiskquotaLauncherShmem->dbArray[i].workerId = INVALID_WORKER_ID; + } + } +} + +/* + * Look for an unused slot. If we find one, grab it. + */ +static DiskquotaDBEntry * +add_db_entry(Oid dbid) +{ + DiskquotaDBEntry *result = NULL; + + /* if there is already dbEntry's dbid equals dbid, returning the existing one */ + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (!dbEntry->in_use && result == NULL) + { + dbEntry->dbid = dbid; + dbEntry->in_use = true; + result = dbEntry; + } + else if (dbEntry->in_use && dbEntry->dbid == dbid) + { + result = dbEntry; + break; + } + } + if (result == NULL) + ereport(WARNING, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " + "will not enable diskquota", + dbid))); + return result; +} + +static void +release_db_entry(Oid dbid) +{ + DiskquotaDBEntry *db = NULL; + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (dbEntry->in_use && dbEntry->dbid == dbid) + { + db = dbEntry; + break; + } + } + if (db == NULL) + { + return; + } + + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + if (db->workerId != INVALID_WORKER_ID) + { + BackgroundWorkerHandle *handle = get_bgworker_handle(db->workerId); + TerminateBackgroundWorker(handle); + } + vacuum_disk_quota_model(db->id); + /* should be called at last to set in_use to false */ + vacuum_db_entry(db); + LWLockRelease(diskquota_locks.dblist_lock); +} + +/* + * Pick next db to run. + * If the curDB is NULL, pick the head db to run. + * If the dbList empty, return NULL. + * If the picked db is in running status, skip it, pick the next one to run. + */ +static DiskquotaDBEntry * +next_db(void) +{ + if (curDB == NULL || curDB == DiskquotaLauncherShmem->dbArrayTail) + { + curDB = DiskquotaLauncherShmem->dbArray; + } + else + { + curDB++; + } + for (; curDB < DiskquotaLauncherShmem->dbArrayTail; curDB++) + { + if (!curDB->in_use) continue; + if (curDB->workerId != INVALID_WORKER_ID) continue; + if (curDB->dbid == InvalidOid) continue; + break; + } + return curDB; +} + +static DiskQuotaWorkerEntry * +next_worker(void) +{ + DiskQuotaWorkerEntry *dq_worker = NULL; + dlist_node *wnode; + + /* acquire worker from worker list */ + LWLockAcquire(diskquota_locks.workerlist_lock, LW_EXCLUSIVE); + if (dlist_is_empty(&DiskquotaLauncherShmem->freeWorkers)) goto out; + wnode = dlist_pop_head_node(&DiskquotaLauncherShmem->freeWorkers); + dq_worker = dlist_container(DiskQuotaWorkerEntry, node, wnode); + reset_worker(dq_worker); + dlist_push_head(&DiskquotaLauncherShmem->runningWorkers, &dq_worker->node); + DiskquotaLauncherShmem->running_workers_num++; +out: + LWLockRelease(diskquota_locks.workerlist_lock); + return dq_worker; +} + +static char * +get_db_name(Oid dbid) +{ + char *dbname = NULL; + MemoryContext old_ctx; + if (dbid == InvalidOid) + { + elog(WARNING, "database oid is invalid"); + return NULL; + } + + StartTransactionCommand(); + (void)GetTransactionSnapshot(); + old_ctx = MemoryContextSwitchTo(TopMemoryContext); + dbname = get_database_name(dbid); + MemoryContextSwitchTo(old_ctx); + CommitTransactionCommand(); + return dbname; +} + +static void +reset_worker(DiskQuotaWorkerEntry *dq_worker) +{ + if (dq_worker == NULL) return; + dq_worker->dbEntry = NULL; +} + +/* + * id can not be changed + */ +static void +vacuum_db_entry(DiskquotaDBEntry *db) +{ + if (db == NULL) return; + db->dbid = InvalidOid; + db->inited = false; + db->workerId = INVALID_WORKER_ID; + db->in_use = false; +} + +static void +init_bgworker_handles(void) +{ + bgworker_handles = (BackgroundWorkerHandle **)(palloc(sizeof(BackgroundWorkerHandle *) * diskquota_max_workers)); + for (int i = 0; i < diskquota_max_workers; i++) + { + bgworker_handles[i] = NULL; + } + return; +} + +static BackgroundWorkerHandle * +get_bgworker_handle(uint32 worker_id) +{ + if (worker_id >= 0) + return bgworker_handles[worker_id]; + else + return NULL; +} + +static void +free_bgworker_handle(uint32 worker_id) +{ + BackgroundWorkerHandle **handle = &bgworker_handles[worker_id]; + if (*handle != NULL) + { + pfree(*handle); + *handle = NULL; + } +} diff --git a/diskquota.h b/diskquota.h index 9f6a1e30b1e..f204f9e267d 100644 --- a/diskquota.h +++ b/diskquota.h @@ -17,6 +17,7 @@ #include "postgres.h" #include "port/atomics.h" +#include "lib/ilist.h" #include "fmgr.h" #include "storage/lock.h" #include "storage/lwlock.h" @@ -29,7 +30,10 @@ #include /* max number of monitored database with diskquota enabled */ -#define MAX_NUM_MONITORED_DB 10 +#define MAX_NUM_MONITORED_DB 50 +#define LAUNCHER_SCHEMA "diskquota_utility" +#define EXTENSION_SCHEMA "diskquota" + typedef enum { NAMESPACE_QUOTA = 0, @@ -57,6 +61,8 @@ typedef enum FETCH_ACTIVE_SIZE, /* fetch size for active tables */ ADD_DB_TO_MONITOR, REMOVE_DB_FROM_BEING_MONITORED, + PAUSE_DB_TO_MONITOR, + RESUME_DB_TO_MONITOR, } FetchTableStatType; typedef enum @@ -71,9 +77,11 @@ struct DiskQuotaLocks LWLock *reject_map_lock; LWLock *extension_ddl_message_lock; LWLock *extension_ddl_lock; /* ensure create diskquota extension serially */ - LWLock *monitoring_dbid_cache_lock; + LWLock *monitored_dbid_cache_lock; LWLock *relation_cache_lock; - LWLock *worker_map_lock; + /* dblist_lock is used to protect a DiskquotaDBEntry's content */ + LWLock *dblist_lock; + LWLock *workerlist_lock; LWLock *altered_reloid_cache_lock; }; typedef struct DiskQuotaLocks DiskQuotaLocks; @@ -84,6 +92,7 @@ typedef struct DiskQuotaLocks DiskQuotaLocks; * the diskquota launcher process and backends. * When backend create an extension, it send a message to launcher * to start the diskquota worker process and write the corresponding + * * dbOid into diskquota database_list table in postgres database. * When backend drop an extension, it will send a message to launcher * to stop the diskquota worker process and remove the dbOid from diskquota @@ -132,17 +141,56 @@ extern DiskQuotaLocks diskquota_locks; extern ExtensionDDLMessage *extension_ddl_message; typedef struct DiskQuotaWorkerEntry DiskQuotaWorkerEntry; +typedef struct DiskquotaDBEntry DiskquotaDBEntry; -/* disk quota worker info used by launcher to manage the worker processes. */ +/* + * disk quota worker info used by launcher to manage the worker processes + * used in DiskquotaLauncherShmem->{freeWorkers, runningWorkers} + */ struct DiskQuotaWorkerEntry { - Oid dbid; - pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ - bool is_paused; /* true if this worker is paused */ - bool is_readiness_logged; /* true if we have logged the error message for not ready */ + dlist_node node; // the double linked list header - // NOTE: this field only can access in diskquota launcher, in other process it is dangling pointer - BackgroundWorkerHandle *handle; + int id; // starts from 0, -1 means invalid + DiskquotaDBEntry *dbEntry; // pointer to shared memory. DiskquotaLauncherShmem->dbArray +}; + +typedef struct +{ + dlist_head freeWorkers; // a list of DiskQuotaWorkerEntry + dlist_head runningWorkers; // a list of DiskQuotaWorkerEntry + DiskquotaDBEntry *dbArray; // size == MAX_NUM_MONITORED_DB + DiskquotaDBEntry *dbArrayTail; + int running_workers_num; + volatile bool isDynamicWorker; + /* + DiskQuotaWorkerEntry worker[diskquota_max_workers]; // the hidden memory to store WorkerEntry + DiskquotaDBEntry dbentry[MAX_NUM_MONITORED_DB]; // the hidden memory for dbentry + */ +} DiskquotaLauncherShmemStruct; + +/* In shmem, only used on master */ +struct DiskquotaDBEntry +{ + int id; // the index of DiskquotaLauncherShmem->dbArray, start from 0 + Oid dbid; // the database oid in postgres catalog + +#define INVALID_WORKER_ID -1 + int workerId; // the id of the worker which is running for the, 0 means no worker for it. + + bool inited; // this entry is inited, will set to true after the worker finish the frist run. + bool in_use; // this slot is in using. AKA dbid != 0 +}; + +/* used in monitored_dbid_cache, in shmem, both on master and segments */ +typedef struct MonitorDBEntryStruct *MonitorDBEntry; +struct MonitorDBEntryStruct +{ + Oid dbid; // the key + + bool paused; + bool is_readiness_logged; /* true if we have logged the error message for not ready */ + pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ }; extern HTAB *disk_quota_worker_map; @@ -156,7 +204,7 @@ extern void invalidate_database_rejectmap(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); -extern void init_disk_quota_model(void); +extern void init_disk_quota_model(uint32 id); extern void refresh_disk_quota_model(bool force); extern bool check_diskquota_state_is_ready(void); extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); @@ -180,11 +228,15 @@ extern List *diskquota_get_index_list(Oid relid); extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); extern Oid diskquota_parse_primary_table_oid(Oid namespace, char *relname); -extern bool worker_increase_epoch(Oid database_oid); -extern unsigned int worker_get_epoch(Oid database_oid); +extern bool worker_increase_epoch(Oid dbid); +extern unsigned int worker_get_epoch(Oid dbid); extern bool diskquota_is_paused(void); extern bool do_check_diskquota_state_is_ready(void); extern bool diskquota_is_readiness_logged(void); extern void diskquota_set_readiness_logged(void); - +extern Size diskquota_launcher_shmem_size(void); +extern void init_launcher_shmem(void); +extern void vacuum_disk_quota_model(uint32 id); +extern void update_monitor_db(Oid dbid, FetchTableStatType action); +extern void update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index 801a6e2f32e..a5dfc567404 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -353,7 +353,7 @@ diskquota_start_worker(PG_FUNCTION_ARGS) extension_ddl_message->dbid = MyDatabaseId; launcher_pid = extension_ddl_message->launcher_pid; /* setup sig handler to diskquota launcher process */ - rc = kill(launcher_pid, SIGUSR1); + rc = kill(launcher_pid, SIGUSR2); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { @@ -400,43 +400,6 @@ diskquota_start_worker(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* - * Dispatch pausing/resuming command to segments. - */ -static void -dispatch_pause_or_resume_command(Oid dbid, bool pause_extension) -{ - CdbPgResults cdb_pgresults = {NULL, 0}; - int i; - StringInfoData sql; - - initStringInfo(&sql); - appendStringInfo(&sql, "SELECT diskquota.%s", pause_extension ? "pause" : "resume"); - if (dbid == InvalidOid) - { - appendStringInfo(&sql, "()"); - } - else - { - appendStringInfo(&sql, "(%d)", dbid); - } - CdbDispatchCommand(sql.data, DF_NONE, &cdb_pgresults); - - for (i = 0; i < cdb_pgresults.numResults; ++i) - { - PGresult *pgresult = cdb_pgresults.pg_results[i]; - if (PQresultStatus(pgresult) != PGRES_TUPLES_OK) - { - cdbdisp_clearCdbPgResults(&cdb_pgresults); - ereport(ERROR, (errmsg("[diskquota] %s extension on segments, encounter unexpected result from segment: %d", - pause_extension ? "pausing" : "resuming", PQresultStatus(pgresult)))); - } - } - - pfree(sql.data); - cdbdisp_clearCdbPgResults(&cdb_pgresults); -} - /* * this function is called by user. * pause diskquota in current or specific database. @@ -455,26 +418,17 @@ diskquota_pause(PG_FUNCTION_ARGS) { dbid = PG_GETARG_OID(0); } - - // pause current worker - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + if (IS_QUERY_DISPATCHER()) { - bool found; - DiskQuotaWorkerEntry *hentry; - - hentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, - // segment dose not boot the worker - // this will add new element on segment - // delete this element in diskquota_resume() - HASH_ENTER, &found); - - hentry->is_paused = true; + // pause current worker + if (SPI_OK_CONNECT != SPI_connect()) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); + } + update_monitor_db_mpp(dbid, PAUSE_DB_TO_MONITOR, EXTENSION_SCHEMA); + SPI_finish(); } - LWLockRelease(diskquota_locks.worker_map_lock); - - if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, true /* pause_extension */); - PG_RETURN_VOID(); } @@ -497,28 +451,16 @@ diskquota_resume(PG_FUNCTION_ARGS) } // active current worker - LWLockAcquire(diskquota_locks.worker_map_lock, LW_EXCLUSIVE); + if (IS_QUERY_DISPATCHER()) { - bool found; - DiskQuotaWorkerEntry *hentry; - - hentry = (DiskQuotaWorkerEntry *)hash_search(disk_quota_worker_map, (void *)&dbid, HASH_FIND, &found); - if (found) - { - hentry->is_paused = false; - } - - // remove the element since we do not need any more - // ref diskquota_pause() - if (found && hentry->handle == NULL) + if (SPI_OK_CONNECT != SPI_connect()) { - hash_search(disk_quota_worker_map, (void *)&dbid, HASH_REMOVE, &found); + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] unable to connect to execute SPI query"))); } + update_monitor_db_mpp(dbid, RESUME_DB_TO_MONITOR, EXTENSION_SCHEMA); + SPI_finish(); } - LWLockRelease(diskquota_locks.worker_map_lock); - - if (IS_QUERY_DISPATCHER()) - dispatch_pause_or_resume_command(PG_NARGS() == 0 ? InvalidOid : dbid, false /* pause_extension */); PG_RETURN_VOID(); } @@ -587,12 +529,6 @@ dq_object_access_hook_on_drop(void) { int rc, launcher_pid; - /* - * Remove the current database from monitored db cache - * on all segments and on coordinator. - */ - update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); - if (!IS_QUERY_DISPATCHER()) { return; @@ -609,7 +545,7 @@ dq_object_access_hook_on_drop(void) extension_ddl_message->result = ERR_PENDING; extension_ddl_message->dbid = MyDatabaseId; launcher_pid = extension_ddl_message->launcher_pid; - rc = kill(launcher_pid, SIGUSR1); + rc = kill(launcher_pid, SIGUSR2); LWLockRelease(diskquota_locks.extension_ddl_message_lock); if (rc == 0) { @@ -1235,33 +1171,46 @@ get_size_in_mb(char *str) * Will print a WARNING to log if out of memory */ void -update_diskquota_db_list(Oid dbid, HASHACTION action) +update_monitor_db(Oid dbid, FetchTableStatType action) { bool found = false; - /* add/remove the dbid to monitoring database cache to filter out table not under - * monitoring in hook functions - */ + // add/remove the dbid to monitoring database cache to filter out table not under + // monitoring in hook functions - LWLockAcquire(diskquota_locks.monitoring_dbid_cache_lock, LW_EXCLUSIVE); - if (action == HASH_ENTER) + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); + if (action == ADD_DB_TO_MONITOR) { - Oid *entry = NULL; - entry = hash_search(monitoring_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); if (entry == NULL) { ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); } + entry->paused = false; + pg_atomic_init_u32(&(entry->epoch), 0); } - else if (action == HASH_REMOVE) + else if (action == REMOVE_DB_FROM_BEING_MONITORED) { - hash_search(monitoring_dbid_cache, &dbid, HASH_REMOVE, &found); - if (!found) + hash_search(monitored_dbid_cache, &dbid, HASH_REMOVE, &found); + } + else if (action == PAUSE_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + if (found) + { + entry->paused = true; + } + } + else if (action == RESUME_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + + if (found) { - ereport(WARNING, (errmsg("cannot remove the database from db list, dbid not found"))); + entry->paused = false; } } - LWLockRelease(diskquota_locks.monitoring_dbid_cache_lock); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); } /* diff --git a/gp_activetable.c b/gp_activetable.c index f15bdbe4371..e280ed2ce50 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -47,9 +47,17 @@ typedef struct DiskQuotaSetOFCache HASH_SEQ_STATUS pos; } DiskQuotaSetOFCache; -HTAB *active_tables_map = NULL; -HTAB *monitoring_dbid_cache = NULL; -HTAB *altered_reloid_cache = NULL; +HTAB *active_tables_map = NULL; // Set + +/* + * monitored_dbid_cache is a allow list for diskquota + * to know which databases it need to monitor. + * + * dbid will be added to it when creating diskquota extension + * dbid will be removed from it when droping diskquota extension + */ +HTAB *monitored_dbid_cache = NULL; // Map +HTAB *altered_reloid_cache = NULL; // Set /* active table hooks which detect the disk file size change. */ static file_create_hook_type prev_file_create_hook = NULL; @@ -74,6 +82,7 @@ static void report_active_table_helper(const RelFileNodeBackend *relFi static void remove_from_active_table_map(const RelFileNodeBackend *relFileNode); static void report_relation_cache_helper(Oid relid); static void report_altered_reloid(Oid reloid); +static Oid get_dbid(ArrayType *array); void init_active_table_hook(void); void init_shm_worker_active_tables(void); @@ -244,7 +253,9 @@ report_relation_cache_helper(Oid relid) * this operation is read-only and does not require absolutely exact. * read the cache with out shared lock. */ - hash_search(monitoring_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); if (!found) { return; @@ -273,11 +284,12 @@ report_active_table_helper(const RelFileNodeBackend *relFileNode) return; } + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); /* do not collect active table info when the database is not under monitoring. * this operation is read-only and does not require absolutely exact. * read the cache with out shared lock */ - hash_search(monitoring_dbid_cache, &dbid, HASH_FIND, &found); - + hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); if (!found) { return; @@ -396,6 +408,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) int32 mode = PG_GETARG_INT32(0); AttInMetadata *attinmeta; bool isFirstCall = true; + Oid dbid; HTAB *localCacheTable = NULL; DiskQuotaSetOFCache *cache = NULL; @@ -433,12 +446,23 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) case FETCH_ACTIVE_SIZE: localCacheTable = get_active_tables_stats(PG_GETARG_ARRAYTYPE_P(1)); break; + /*TODO: add another UDF to update the monitored_db_cache */ case ADD_DB_TO_MONITOR: - update_diskquota_db_list(MyDatabaseId, HASH_ENTER); - break; + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, ADD_DB_TO_MONITOR); + PG_RETURN_NULL(); case REMOVE_DB_FROM_BEING_MONITORED: - update_diskquota_db_list(MyDatabaseId, HASH_REMOVE); - break; + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, REMOVE_DB_FROM_BEING_MONITORED); + PG_RETURN_NULL(); + case PAUSE_DB_TO_MONITOR: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, PAUSE_DB_TO_MONITOR); + PG_RETURN_NULL(); + case RESUME_DB_TO_MONITOR: + dbid = get_dbid(PG_GETARG_ARRAYTYPE_P(1)); + update_monitor_db(dbid, RESUME_DB_TO_MONITOR); + PG_RETURN_NULL(); default: ereport(ERROR, (errmsg("Unused mode number %d, transaction will be aborted", mode))); break; @@ -512,6 +536,22 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } +static Oid +get_dbid(ArrayType *array) +{ + Assert(ARR_ELEMTYPE(array) == OIDOID); + char *ptr; + bool typbyval; + int16 typlen; + char typalign; + Oid dbid; + + get_typlenbyvalalign(ARR_ELEMTYPE(array), &typlen, &typbyval, &typalign); + ptr = ARR_DATA_PTR(array); + dbid = DatumGetObjectId(fetch_att(ptr, typbyval, typlen)); + return dbid; +} + /* * Call pg_table_size to calcualte the * active table size on each segments. diff --git a/gp_activetable.h b/gp_activetable.h index 49aa7a5fb4d..d0a07baf4a4 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -41,10 +41,9 @@ extern HTAB *gp_fetch_active_tables(bool force); extern void init_active_table_hook(void); extern void init_shm_worker_active_tables(void); extern void init_lock_active_tables(void); -extern void update_diskquota_db_list(Oid dbid, HASHACTION action); extern HTAB *active_tables_map; -extern HTAB *monitoring_dbid_cache; +extern HTAB *monitored_dbid_cache; extern HTAB *altered_reloid_cache; #define atooid(x) ((Oid)strtoul((x), NULL, 10)) diff --git a/quotamodel.c b/quotamodel.c index 62d4c9d23b6..47cd7fa2451 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -44,6 +44,7 @@ /* cluster level max size of rejectmap */ #define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) +#define MAX_TABLES (1024L * 8) /* cluster level init size of rejectmap */ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ @@ -61,6 +62,7 @@ typedef struct GlobalRejectMapEntry GlobalRejectMapEntry; typedef struct LocalRejectMapEntry LocalRejectMapEntry; int SEGCOUNT = 0; + /* * local cache of table disk size and corresponding schema and owner */ @@ -78,6 +80,9 @@ struct TableSizeEntry bool need_flush; /* whether need to flush to table table_size */ }; +/* + * table disk size and corresponding schema and owner + */ struct QuotaMapEntryKey { Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; @@ -164,7 +169,6 @@ static HTAB *local_disk_quota_reject_map = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ -static void init_all_quota_maps(void); static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid); static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys); static void remove_quota(QuotaType type, Oid *keys, int16 segid); @@ -188,25 +192,7 @@ static void init_lwlocks(void); static void export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name); void truncateStringInfo(StringInfo str, int nchars); - -static void -init_all_quota_maps(void) -{ - HASHCTL hash_ctl = {0}; - hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.hcxt = TopMemoryContext; - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) - { - hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - hash_ctl.hash = tag_hash; - if (quota_info[type].map != NULL) - { - hash_destroy(quota_info[type].map); - } - quota_info[type].map = - hash_create(quota_info[type].map_name, 1024L, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - } -} +static void format_name(const char *prefix, uint32 id, StringInfo str); /* add a new entry quota or update the old entry quota */ static void @@ -417,21 +403,12 @@ disk_quota_shmem_startup(void) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(struct MonitorDBEntryStruct); hash_ctl.hash = oid_hash; - monitoring_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, - MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM | HASH_FUNCTION); - - /* use disk_quota_worker_map to manage diskquota worker processes. */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(Oid); - hash_ctl.entrysize = sizeof(DiskQuotaWorkerEntry); - hash_ctl.hash = oid_hash; - - disk_quota_worker_map = ShmemInitHash("disk quota worker map", MAX_NUM_MONITORED_DB, MAX_NUM_MONITORED_DB, - &hash_ctl, HASH_ELEM | HASH_FUNCTION); - + monitored_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, + MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + init_launcher_shmem(); LWLockRelease(AddinShmemInitLock); } @@ -443,7 +420,7 @@ disk_quota_shmem_startup(void) * extension_ddl_message. * extension_ddl_lock is used to avoid concurrent diskquota * extension ddl(create/drop) command. - * monitoring_dbid_cache_lock is used to shared `monitoring_dbid_cache` on segment process. + * monitored_dbid_cache_lock is used to shared `monitored_dbid_cache` on segment process. */ static void init_lwlocks(void) @@ -452,12 +429,23 @@ init_lwlocks(void) diskquota_locks.reject_map_lock = LWLockAssign(); diskquota_locks.extension_ddl_message_lock = LWLockAssign(); diskquota_locks.extension_ddl_lock = LWLockAssign(); - diskquota_locks.monitoring_dbid_cache_lock = LWLockAssign(); + diskquota_locks.monitored_dbid_cache_lock = LWLockAssign(); diskquota_locks.relation_cache_lock = LWLockAssign(); - diskquota_locks.worker_map_lock = LWLockAssign(); + diskquota_locks.dblist_lock = LWLockAssign(); + diskquota_locks.workerlist_lock = LWLockAssign(); diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); } +static Size +diskquota_worker_shmem_size() +{ + Size size; + size = hash_estimate_size(1024 * 1024, sizeof(TableSizeEntry)); + size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); + size = add_size(size, hash_estimate_size(1024L, sizeof(struct QuotaMapEntry)) * NUM_QUOTA_TYPES); + return size; +} + /* * DiskQuotaShmemSize * Compute space needed for diskquota-related shared memory @@ -466,15 +454,16 @@ static Size DiskQuotaShmemSize(void) { Size size; - size = sizeof(ExtensionDDLMessage); size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_REJECT_ENTRIES, sizeof(GlobalRejectMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); - size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(Oid))); - size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(DiskQuotaWorkerEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); + size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, + sizeof(struct MonitorDBEntryStruct))); // monitored_dbid_cache + size = add_size(size, diskquota_launcher_shmem_size()); + size = add_size(size, diskquota_worker_shmem_size() * MAX_NUM_MONITORED_DB); return size; } @@ -483,63 +472,123 @@ DiskQuotaShmemSize(void) * Init disk quota model when the worker process firstly started. */ void -init_disk_quota_model(void) +init_disk_quota_model(uint32 id) { - HASHCTL hash_ctl; + HASHCTL hash_ctl; + StringInfoData str; + initStringInfo(&str); - /* initialize hash table for table/schema/role etc. */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TableEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); - hash_ctl.hcxt = CurrentMemoryContext; hash_ctl.hash = tag_hash; - table_size_map = hash_create("TableSizeEntry map", 1024 * 8, &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - - init_all_quota_maps(); + format_name("TableSizeEntrymap", id, &str); + table_size_map = ShmemInitHash(str.data, MAX_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); - /* - * local diskquota reject map is used to reduce the lock hold time of - * rejectmap in shared memory - */ + /* for localrejectmap */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(RejectMapEntry); hash_ctl.entrysize = sizeof(LocalRejectMapEntry); - hash_ctl.hcxt = CurrentMemoryContext; hash_ctl.hash = tag_hash; - + /* WARNNING: The max length of name of the map is 48 */ + format_name("localrejectmap", id, &str); local_disk_quota_reject_map = - hash_create("local rejectmap whose quota limitation is reached", MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, - &hash_ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + + /* for quota_info */ + + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(struct QuotaMapEntry); + hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); + hash_ctl.hash = tag_hash; + format_name(quota_info[type].map_name, id, &str); + quota_info[type].map = ShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + } + pfree(str.data); } -static void -dispatch_my_db_to_all_segments(void) +/* + * Reset the shared memory of diskquota worker + * + * Suppose a user first drops diskquota extension, then recreates it in + * the same database, as diskquota worker will get the same memory address + * as before. + * + * As the shared memory can not be recycled, so we just clean up the shared + * memory when dropping the extension. + * - memset diskquotaDBStatus to 0 + * - clean all items in the maps + */ +void +vacuum_disk_quota_model(uint32 id) { - /* Add current database to the monitored db cache on all segments */ - int ret = SPI_execute_with_args( - "SELECT diskquota.diskquota_fetch_table_stat($1, ARRAY[]::oid[]) FROM gp_dist_random('gp_id')", 1, - (Oid[]){ - INT4OID, - }, - (Datum[]){ - Int32GetDatum(ADD_DB_TO_MONITOR), - }, - NULL, true, 0); + HASH_SEQ_STATUS iter; + TableSizeEntry *tsentry = NULL; + LocalRejectMapEntry *localrejectentry; + struct QuotaMapEntry *qentry; - ereportif(ret != SPI_OK_SELECT, ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + HASHCTL hash_ctl; + StringInfoData str; + initStringInfo(&str); - /* Add current database to the monitored db cache on coordinator */ - update_diskquota_db_list(MyDatabaseId, HASH_ENTER); + /* table_size_map */ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(TableEntryKey); + hash_ctl.entrysize = sizeof(TableSizeEntry); + hash_ctl.hash = tag_hash; + + format_name("TableSizeEntrymap", id, &str); + table_size_map = ShmemInitHash(str.data, MAX_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + hash_seq_init(&iter, table_size_map); + while ((tsentry = hash_seq_search(&iter)) != NULL) + { + hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); + } + + /* localrejectmap */ + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(RejectMapEntry); + hash_ctl.entrysize = sizeof(LocalRejectMapEntry); + hash_ctl.hash = tag_hash; + /* WARNNING: The max length of name of the map is 48 */ + format_name("localrejectmap", id, &str); + local_disk_quota_reject_map = + ShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, + HASH_ELEM | HASH_FUNCTION); + hash_seq_init(&iter, local_disk_quota_reject_map); + while ((localrejectentry = hash_seq_search(&iter)) != NULL) + { + hash_search(local_disk_quota_reject_map, &localrejectentry->keyitem, HASH_REMOVE, NULL); + } + + /* quota_info */ + + for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + { + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(struct QuotaMapEntry); + hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); + hash_ctl.hash = tag_hash; + format_name(quota_info[type].map_name, id, &str); + quota_info[type].map = ShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + hash_seq_init(&iter, quota_info[type].map); + while ((qentry = hash_seq_search(&iter)) != NULL) + { + hash_search(quota_info[type].map, &qentry->keys, HASH_REMOVE, NULL); + } + } + pfree(str.data); } /* * Check whether the diskquota state is ready */ bool -check_diskquota_state_is_ready(void) +check_diskquota_state_is_ready() { bool is_ready = false; bool connected = false; @@ -563,8 +612,7 @@ check_diskquota_state_is_ready(void) connected = true; PushActiveSnapshot(GetTransactionSnapshot()); pushed_active_snap = true; - dispatch_my_db_to_all_segments(); - is_ready = do_check_diskquota_state_is_ready(); + is_ready = do_check_diskquota_state_is_ready(); } PG_CATCH(); { @@ -2027,3 +2075,31 @@ show_rejectmap(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } + +void +update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema) +{ + StringInfoData sql_command; + initStringInfo(&sql_command); + appendStringInfo(&sql_command, + "SELECT %s.diskquota_fetch_table_stat(%d, '{%d}'::oid[]) FROM gp_dist_random('gp_id')", schema, + action, dbid); + /* Add current database to the monitored db cache on all segments */ + int ret = SPI_execute(sql_command.data, true, 0); + pfree(sql_command.data); + + ereportif(ret != SPI_OK_SELECT, ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); + + /* Add current database to the monitored db cache on coordinator */ + update_monitor_db(dbid, action); +} + +static void +format_name(const char *prefix, uint32 id, StringInfo str) +{ + resetStringInfo(str); + appendStringInfo(str, "%s_%u", prefix, id); + Assert(str->len <= SHMEM_INDEX_KEYSIZE); +} diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 61ccc9337cd..ee2fe947b7b 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -9,7 +9,7 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status -test: test_update_db_cache +#test: test_update_db_cache test: test_quota_view_no_table # disable this test due to GPDB behavior change # test: test_table_size @@ -38,5 +38,6 @@ test: test_ctas_tablespace_role test: test_ctas_tablespace_schema test: test_default_tablespace test: test_tablespace_diff_schema +test: test_worker_schedule test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 794b1bd854b..036ab1f4b0a 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -11,12 +11,54 @@ CREATE DATABASE dbx7 ; CREATE DATABASE dbx8 ; CREATE DATABASE dbx9 ; CREATE DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation +20220802:15:47:27:028366 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 20 --skipvalidation' +\! gpstop -arf +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:43:25:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220802:16:43:26:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220802:16:43:28:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220802:16:43:29:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220802:16:43:30:058210 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c show max_worker_processes; max_worker_processes ---------------------- 20 (1 row) +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 20 +(1 row) + \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 -- FIXME: We need to sleep for a while each time after CREATE EXTENSION and @@ -283,18 +325,24 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:376) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -11 +12 SELECT diskquota.wait_for_worker_new_epoch(); -ERROR: schema "diskquota" does not exist + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \c dbx10 CREATE EXTENSION diskquota; -ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:376) \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -11 +13 SELECT diskquota.wait_for_worker_new_epoch(); -ERROR: schema "diskquota" does not exist + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \c dbx0 SELECT diskquota.pause(); pause @@ -310,7 +358,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -10 +12 \c dbx1 SELECT diskquota.pause(); pause @@ -326,7 +374,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -9 +11 \c dbx2 SELECT diskquota.pause(); pause @@ -342,7 +390,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -8 +10 \c dbx3 SELECT diskquota.pause(); pause @@ -358,7 +406,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -7 +9 \c dbx4 SELECT diskquota.pause(); pause @@ -374,7 +422,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -6 +8 \c dbx5 SELECT diskquota.pause(); pause @@ -390,7 +438,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -5 +7 \c dbx6 SELECT diskquota.pause(); pause @@ -406,7 +454,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -4 +6 \c dbx7 SELECT diskquota.pause(); pause @@ -422,7 +470,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -3 +5 \c dbx8 SELECT diskquota.pause(); pause @@ -438,23 +486,37 @@ SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -2 +4 \c dbx9 SELECT diskquota.pause(); -ERROR: schema "diskquota" does not exist + pause +------- + +(1 row) + SELECT diskquota.wait_for_worker_new_epoch(); -ERROR: schema "diskquota" does not exist + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -ERROR: extension "diskquota" does not exist \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -2 +3 \c dbx10 SELECT diskquota.pause(); -ERROR: schema "diskquota" does not exist + pause +------- + +(1 row) + SELECT diskquota.wait_for_worker_new_epoch(); -ERROR: schema "diskquota" does not exist + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; -ERROR: extension "diskquota" does not exist \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 \c contrib_regression @@ -469,3 +531,45 @@ DROP DATABASE dbx7 ; DROP DATABASE dbx8 ; DROP DATABASE dbx9 ; DROP DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +20220802:15:49:09:029439 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' +\! gpstop -arf; +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220802:16:32:34:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:32:35:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220802:16:32:35:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220802:16:32:36:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220802:16:32:37:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220802:16:32:40:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220802:16:32:41:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220802:16:32:42:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220802:16:32:44:046832 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out index 6d45170a645..f2e1fc60b18 100644 --- a/tests/regress/expected/test_update_db_cache.out +++ b/tests/regress/expected/test_update_db_cache.out @@ -57,6 +57,12 @@ FROM gp_dist_random('gp_id'); DROP TABLE t; DROP TABLE t_no_extension; +SELECT diskquota.pause(); + pause +------- + +(1 row) + DROP EXTENSION diskquota; \c contrib_regression DROP DATABASE test_db_cache; diff --git a/tests/regress/expected/test_worker_schedule.out b/tests/regress/expected/test_worker_schedule.out new file mode 100644 index 00000000000..8003a4e230c --- /dev/null +++ b/tests/regress/expected/test_worker_schedule.out @@ -0,0 +1,648 @@ +-- start_ignore +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t3; +NOTICE: database "t3" does not exist, skipping +DROP DATABASE IF EXISTS t4; +NOTICE: database "t4" does not exist, skipping +DROP DATABASE IF EXISTS t5; +NOTICE: database "t5" does not exist, skipping +DROP DATABASE IF EXISTS t6; +NOTICE: database "t6" does not exist, skipping +DROP DATABASE IF EXISTS t7; +NOTICE: database "t7" does not exist, skipping +DROP DATABASE IF EXISTS t8; +NOTICE: database "t8" does not exist, skipping +DROP DATABASE IF EXISTS t9; +NOTICE: database "t9" does not exist, skipping +DROP DATABASE IF EXISTS t10; +NOTICE: database "t10" does not exist, skipping +DROP DATABASE IF EXISTS t11; +NOTICE: database "t11" does not exist, skipping +DROP DATABASE IF EXISTS t12; +NOTICE: database "t12" does not exist, skipping +CREATE DATABASE t1; +CREATE DATABASE t2; +CREATE DATABASE t3; +CREATE DATABASE t4; +CREATE DATABASE t5; +CREATE DATABASE t6; +CREATE DATABASE t7; +CREATE DATABASE t8; +CREATE DATABASE t9; +CREATE DATABASE t10; +CREATE DATABASE t11; +CREATE DATABASE t12; +--end_ignore +\c t1 +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1; +20220719:17:37:46:030120 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1' +\! gpstop -arf; +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:37:47:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:37:48:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:37:49:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:37:51:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:37:54:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:37:55:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:37:55:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:37:56:030207 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + +\c t2 +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +CREATE EXTENSION diskquota; +CREATE TABLE f3(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f3 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f3 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 11; +20220727:14:23:23:025074 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' +\! gpstop -arf; +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:38:29:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:38:30:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:38:33:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:38:35:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:38:38:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 11 +(1 row) + +\c t4 +CREATE EXTENSION diskquota; +CREATE TABLE f4(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f4 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f4 | 98304 | -1 +(1 row) + +\c t5 +CREATE EXTENSION diskquota; +CREATE TABLE f5(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f5 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f5 | 98304 | -1 +(1 row) + +\c t6 +CREATE EXTENSION diskquota; +CREATE TABLE f6(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f6 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f6 | 98304 | -1 +(1 row) + +\c t7 +CREATE EXTENSION diskquota; +CREATE TABLE f7(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f7 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f7 | 98304 | -1 +(1 row) + +\c t8 +CREATE EXTENSION diskquota; +CREATE TABLE f8(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f8 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f8 | 98304 | -1 +(1 row) + +\c t9 +CREATE EXTENSION diskquota; +CREATE TABLE f9(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f9 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f9 | 98304 | -1 +(1 row) + +\c t10 +CREATE EXTENSION diskquota; +CREATE TABLE f10(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f10 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f10 | 98304 | -1 +(1 row) + +\c t11 +CREATE EXTENSION diskquota; +CREATE TABLE f11(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f11 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f11 | 98304 | -1 +(1 row) + +\c t1 +INSERT into f1 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f1 | 3997696 | -1 +(1 row) + +\c t7 +INSERT into f7 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f7 | 3997696 | -1 +(1 row) + +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f1; +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f2; +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t9 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t10 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t11 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t12 +CREATE EXTENSION diskquota; +CREATE TABLE f12(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f12 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f12 | 98304 | -1 +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +--start_ignore +\c contrib_regression +DROP DATABASE t1; +DROP DATABASE t2; +DROP DATABASE t3; +DROP DATABASE t4; +DROP DATABASE t5; +DROP DATABASE t6; +DROP DATABASE t7; +DROP DATABASE t8; +DROP DATABASE t9; +DROP DATABASE t10; +DROP DATABASE t11; +DROP DATABASE t12; +\! gpconfig -r diskquota.worker_timeout; +20220719:17:19:18:023651 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' +\! gpconfig -r diskquota.naptime; +20220719:17:19:20:023738 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.naptime' +\! gpconfig -r diskquota.max_workers; +20220719:17:19:23:023824 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Greenplum Master catalog information +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Obtaining Segment details from master... +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.20.3+dev.5.g4bc90eab02 build dev' +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing Master instance shutdown with mode='fast' +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Master segment instance directory=/Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Attempting forceful termination of any leftover master process +20220719:17:19:23:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Terminating processes for segment /Users/xiwang/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/ +20220719:17:19:24:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Stopping master standby host wxiaoran-a01.vmware.com mode=fast +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown standby process on wxiaoran-a01.vmware.com +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20220719:17:19:25:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20220719:17:19:28:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-0.00% of jobs completed +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-100.00% of jobs completed +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments stopped successfully = 6 +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:- Segments with errors during stop = 0 +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:----------------------------------------------------- +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Successfully shutdown 6 of 6 segment instances +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Database successfully shutdown with no errors reported +20220719:17:19:30:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpmmon process +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpmmon process found +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover gpsmon processes +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20220719:17:19:31:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory +20220719:17:19:33:023913 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... +--end_ignore diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index 1b8c8dfafc3..3861c3f63ff 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -5,6 +5,7 @@ CREATE DATABASE diskquota; \! gpconfig -c diskquota.naptime -v 0 --skipvalidation \! gpconfig -c max_worker_processes -v 20 --skipvalidation \! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation \! gpstop -raf --end_ignore diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index a3003957aef..0c76e6debb8 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -12,7 +12,13 @@ CREATE DATABASE dbx8 ; CREATE DATABASE dbx9 ; CREATE DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation +\! gpstop -arf +--end_ignore +\c show max_worker_processes; +show diskquota.max_workers; \! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l @@ -217,3 +223,9 @@ DROP DATABASE dbx7 ; DROP DATABASE dbx8 ; DROP DATABASE dbx9 ; DROP DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +\! gpstop -arf; +--end_ignore +\c +show diskquota.max_workers; diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql index 5256fbd8031..01d7179d684 100644 --- a/tests/regress/sql/test_update_db_cache.sql +++ b/tests/regress/sql/test_update_db_cache.sql @@ -39,6 +39,7 @@ FROM gp_dist_random('gp_id'); DROP TABLE t; DROP TABLE t_no_extension; +SELECT diskquota.pause(); DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_worker_schedule.sql b/tests/regress/sql/test_worker_schedule.sql new file mode 100644 index 00000000000..f63e02f4ec2 --- /dev/null +++ b/tests/regress/sql/test_worker_schedule.sql @@ -0,0 +1,225 @@ +-- start_ignore +\c + +DROP DATABASE IF EXISTS t1; +DROP DATABASE IF EXISTS t3; +DROP DATABASE IF EXISTS t4; +DROP DATABASE IF EXISTS t5; +DROP DATABASE IF EXISTS t6; +DROP DATABASE IF EXISTS t7; +DROP DATABASE IF EXISTS t8; +DROP DATABASE IF EXISTS t9; +DROP DATABASE IF EXISTS t10; +DROP DATABASE IF EXISTS t11; +DROP DATABASE IF EXISTS t12; +CREATE DATABASE t1; +CREATE DATABASE t2; +CREATE DATABASE t3; +CREATE DATABASE t4; +CREATE DATABASE t5; +CREATE DATABASE t6; +CREATE DATABASE t7; +CREATE DATABASE t8; +CREATE DATABASE t9; +CREATE DATABASE t10; +CREATE DATABASE t11; +CREATE DATABASE t12; +--end_ignore +\c t1 +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1; +\! gpstop -arf; +--end_ignore + +\c +SHOW diskquota.max_workers; + +\c t2 +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + +\c t3 +CREATE EXTENSION diskquota; +CREATE TABLE f3(a int); +INSERT into f3 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 11; +\! gpstop -arf; +--end_ignore + +\c +SHOW diskquota.max_workers; + +\c t4 +CREATE EXTENSION diskquota; +CREATE TABLE f4(a int); +INSERT into f4 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; + +\c t5 +CREATE EXTENSION diskquota; +CREATE TABLE f5(a int); +INSERT into f5 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; + +\c t6 +CREATE EXTENSION diskquota; +CREATE TABLE f6(a int); +INSERT into f6 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; + +\c t7 +CREATE EXTENSION diskquota; +CREATE TABLE f7(a int); +INSERT into f7 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + +\c t8 +CREATE EXTENSION diskquota; +CREATE TABLE f8(a int); +INSERT into f8 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; + +\c t9 +CREATE EXTENSION diskquota; +CREATE TABLE f9(a int); +INSERT into f9 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; + +\c t10 +CREATE EXTENSION diskquota; +CREATE TABLE f10(a int); +INSERT into f10 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; + +\c t11 +CREATE EXTENSION diskquota; +CREATE TABLE f11(a int); +INSERT into f11 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; + +\c t1 +INSERT into f1 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +\c t7 +INSERT into f7 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + +\c t1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +DROP TABLE f1; +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +DROP TABLE f2; +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + +\c t3 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t4 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t5 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t6 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t7 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t8 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t9 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t10 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t11 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c t12 +CREATE EXTENSION diskquota; +CREATE TABLE f12(a int); +INSERT into f12 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c t1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +--start_ignore +\c contrib_regression +DROP DATABASE t1; +DROP DATABASE t2; +DROP DATABASE t3; +DROP DATABASE t4; +DROP DATABASE t5; +DROP DATABASE t6; +DROP DATABASE t7; +DROP DATABASE t8; +DROP DATABASE t9; +DROP DATABASE t10; +DROP DATABASE t11; +DROP DATABASE t12; +\! gpconfig -r diskquota.worker_timeout; +\! gpconfig -r diskquota.max_workers; +\! gpstop -arf; +--end_ignore From 3d137c327f6b779736ea7fdd222197228fed8762 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 20 Sep 2022 21:33:48 +0800 Subject: [PATCH 213/330] Fix the test failure on release pipeline Regress tests failed on the release pipeline due to the diskquota.naptime is not 0. As diskquota launcher firstly sleep, then starts diskquota workers. When naptime is not 0, workers are started very late, then some tests which checking them failed. In this pr, fix the test by starting diskquota workers in the launcher before it sleeps. This makes diskquota workers will be started immediately when the launcher starts. Also, increased the sleep time in the tests. Sometimes, after restarting gpdb cluster, the db has not yet been added to monitored_db_cache, but worker_get_epoch is called to fetch the db from the cache, then it will fail. Shouldn't to report error here, printing a warning is ok. --- diskquota.c | 87 +++++++++++------------ tests/init_file | 2 + tests/regress/expected/test_extension.out | 2 +- tests/regress/sql/test_extension.sql | 2 +- 4 files changed, 47 insertions(+), 46 deletions(-) diff --git a/diskquota.c b/diskquota.c index a8d14fe28d7..3072e430a80 100644 --- a/diskquota.c +++ b/diskquota.c @@ -640,15 +640,52 @@ disk_quota_launcher_main(Datum main_arg) /* main loop: do this until the SIGTERM handler tells us to terminate. */ ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); curDB = NULL; - while (!got_sigterm) { - int rc; + int rc; + CHECK_FOR_INTERRUPTS(); + + /* + * modify wait time + */ + long secs; + int microsecs; + TimestampDifference(GetCurrentTimestamp(), + TimestampTzPlusMilliseconds(loop_start_time, diskquota_naptime * 1000L), &secs, µsecs); + nap.tv_sec = secs; + nap.tv_usec = microsecs; + + if (curDB == DiskquotaLauncherShmem->dbArrayTail) + { + /* Have sleep enough time, should start another loop */ + if (nap.tv_sec == 0 && nap.tv_usec == 0) + { + loop_start_time = GetCurrentTimestamp(); + /* set the curDB pointing to the head of the db list */ + curDB = NULL; + } + /* do nothing, just to sleep untill the nap time is 0 */ + else + { + continue; + } + } + + /* If there are no enough workers to run db, we can firstly sleep to wait workers */ + if (nap.tv_sec == 0 && nap.tv_usec == 0) + { + nap.tv_sec = diskquota_naptime > 0 ? diskquota_naptime : 1; + nap.tv_usec = 0; + } + + while (curDB != DiskquotaLauncherShmem->dbArrayTail && CanLaunchWorker()) + { + start_worker(); + } + bool sigusr1 = false; bool sigusr2 = false; - CHECK_FOR_INTERRUPTS(); - /* * background workers mustn't call usleep() or any direct equivalent: * instead, they may wait on their process latch, which sleeps as @@ -702,44 +739,6 @@ disk_quota_launcher_main(Datum main_arg) sigusr1 = true; } - /* - * modify wait time - */ - long secs; - int microsecs; - TimestampDifference(GetCurrentTimestamp(), - TimestampTzPlusMilliseconds(loop_start_time, diskquota_naptime * 1000L), &secs, µsecs); - nap.tv_sec = secs; - nap.tv_usec = microsecs; - - if (curDB == DiskquotaLauncherShmem->dbArrayTail) - { - /* Have sleep enough time, should start another loop */ - if (nap.tv_sec == 0 && nap.tv_usec == 0) - { - loop_start_time = GetCurrentTimestamp(); - /* set the curDB pointing to the head of the db list */ - curDB = NULL; - } - /* do nothing, just to sleep untill the nap time is 0 */ - else - { - continue; - } - } - - /* If there are no enough workers to run db, we can firstly sleep to wait workers */ - if (nap.tv_sec == 0 && nap.tv_usec == 0) - { - nap.tv_sec = diskquota_naptime > 0 ? diskquota_naptime : 1; - nap.tv_usec = 0; - } - - while (curDB != DiskquotaLauncherShmem->dbArrayTail && CanLaunchWorker()) - { - start_worker(); - } - loop_begin = loop_end; loop_end = time(NULL); if (isAbnormalLoopTime(loop_end - loop_begin)) @@ -1364,8 +1363,8 @@ worker_get_epoch(Oid dbid) LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); if (!found) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", get_db_name(dbid)))); + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(dbid)))); } return epoch; } diff --git a/tests/init_file b/tests/init_file index 21a00398e8c..bc67331cfa5 100644 --- a/tests/init_file +++ b/tests/init_file @@ -5,6 +5,7 @@ -- start_matchignore # This pattern is extracted from gpdb/src/test/regress/init_file m/^(?:HINT|NOTICE):\s+.+\'DISTRIBUTED BY\' clause.*/ +m/WARNING: \[diskquota\] worker not found for database.*/ -- end_matchignore -- start_matchsubs @@ -35,4 +36,5 @@ s/ERROR: tablespace: \d+, schema: \d+ diskquota exceeded.*/[hardlimit] tablespa m/^ERROR: Can not set disk quota for system owner:.*/ s/^ERROR: Can not set disk quota for system owner:.*/ERROR: Can not set disk quota from system owner:/ + -- end_matchsubs diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 036ab1f4b0a..579b05b80c8 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -59,7 +59,7 @@ show diskquota.max_workers; 20 (1 row) -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 1.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 -- FIXME: We need to sleep for a while each time after CREATE EXTENSION and -- DROP EXTENSION to wait for the bgworker to start or to exit. diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 0c76e6debb8..1e2c5ca6dc6 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -20,7 +20,7 @@ CREATE DATABASE dbx10 ; show max_worker_processes; show diskquota.max_workers; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 1.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -- FIXME: We need to sleep for a while each time after CREATE EXTENSION and -- DROP EXTENSION to wait for the bgworker to start or to exit. From d4edeec17fce0d716cefd69b4572a38c3582f630 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 22 Sep 2022 15:27:31 +0800 Subject: [PATCH 214/330] Move pr/merge pipeline to dev2 (#238) And the docker clang-format image exists on the extensions gcp, I am too lazy to move it here and it will be unnecessary to do that since we have the check as github actions. --- concourse/README.md | 23 ++++++------- concourse/fly.sh | 25 +++++++++++++- concourse/pipeline/dev.yml | 4 +-- concourse/pipeline/job_def.lib.yml | 26 --------------- concourse/pipeline/pr.yml | 4 +-- concourse/pipeline/res_def.yml | 44 ++++++++++--------------- concourse/scripts/check-clang-format.sh | 12 ------- 7 files changed, 58 insertions(+), 80 deletions(-) delete mode 100755 concourse/scripts/check-clang-format.sh diff --git a/concourse/README.md b/concourse/README.md index 14492679bac..259b436bf38 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -11,12 +11,12 @@ ### PR Pipeline -https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/pr.diskquota +https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/pr.diskquota ### Main Branch Pipeline The development happens on the `gpdb` branch. The merge pipeline for the `gpdb` branch is -https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/merge.diskquota:gpdb +https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/merge.diskquota:gpdb # Fly a pipeline @@ -25,24 +25,25 @@ https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/merge.diskquota:gpdb - Install [ytt](https://carvel.dev/ytt/). It's written in go. So just download the executable for your platform from the [release page](https://github.com/vmware-tanzu/carvel-ytt/releases). - Make the `fly` command in the `PATH` or export its location to `FLY` env. -- Clone the `gp-continuous-integration` repo to `$HOME/workspace` or set its parent directory to `WORKSPACE` env. -- Login with the `fly` command. Assume we are using `extension` as the target name. +- Login with the `fly` command. Assume we are using `dev2` as the target name. + +``` +# -n gp-extensions is to set the concourse team +fly -t dev2 login -c https://dev2.ci.gpdb.pivotal.io -n gp-extensions +``` - ``` - fly -t extension login -c https://extensions.ci.gpdb.pivotal.io - ``` - `cd` to the `concourse` directory. ## Fly the PR pipeline ``` -./fly.sh -t extension -c pr +./fly.sh -t dev2 -c pr ``` ## Fly the merge pipeline ``` -./fly.sh -t extension -c merge +./fly.sh -t dev2 -c merge ``` ## Fly the release pipeline @@ -67,7 +68,7 @@ To fly a release pipeline from a specific branch: ## Fly the dev pipeline ``` -./fly.sh -t extension -c dev -p -b +./fly.sh -t dev2 -c dev -p -b ``` ## Webhook @@ -84,6 +85,6 @@ curl --data-raw "foo" ## PR pipeline is not triggered. -The PR pipeline relies on the webhook to detect the new PR. However, due to the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://extensions.ci.gpdb.pivotal.io/teams/main/pipelines/pr.diskquota/resources/diskquota_pr and click ⟳ button there. +The PR pipeline relies on the webhook to detect the new PR. However, due to the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/pr.diskquota/resources/diskquota_pr and click ⟳ button there. TIPS: Just don't fork, name your branch as `/` and push it here to create PR. diff --git a/concourse/fly.sh b/concourse/fly.sh index 37c3bbe6a45..75404b6be6c 100755 --- a/concourse/fly.sh +++ b/concourse/fly.sh @@ -6,6 +6,7 @@ fly=${FLY:-"fly"} echo "'fly' command: ${fly}" echo "" proj_name="diskquota" +concourse_team="main" usage() { if [ -n "$1" ]; then @@ -19,6 +20,26 @@ usage() { exit 1 } +# Hacky way to find out which concourse team is being used. +# The team name is needed to generate webhook URL +detect_concourse_team() { + local target="$1" + local fly_rc_file="$HOME/.flyrc" + local found_target=false + while read -r line; + do + line="$(echo -e "${line}" | tr -d '[:space:]')" + if [ ${found_target} != true ] && [ "${line}" = "${target}:" ]; then + found_target=true + fi + if [ ${found_target} = true ] && [[ "${line}" == team:* ]]; then + concourse_team=$(echo "${line}" | cut --delimiter=":" --fields=2) + echo "Use concourse target: ${target}, team: ${concourse_team}" + return + fi + done < "${fly_rc_file}" +} + # Parse command line options while getopts ":c:t:p:b:T" o; do case "${o}" in @@ -52,6 +73,8 @@ if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then usage "" fi +detect_concourse_team "${target}" + pipeline_type="" # Decide ytt options to generate pipeline case ${pipeline_config} in @@ -139,6 +162,6 @@ concourse_url=$(fly targets | awk "{if (\$1 == \"${target}\") {print \$2}}") echo "" echo "================================================================================" echo "Remeber to set the the webhook URL on GitHub:" -echo "${concourse_url}/api/v1/teams/main/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" +echo "${concourse_url}/api/v1/teams/${concourse_team}/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" echo "You may need to change the base URL if a differnt concourse server is used." echo "================================================================================" diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index ea7fe5b8490..93a1a252f9d 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -1,5 +1,5 @@ #@ load("job_def.lib.yml", -#@ "entrance_check_job", +#@ "entrance_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", @@ -24,7 +24,7 @@ jobs: #@ "res_map": res_map, #@ "trigger": trigger, #@ } -- #@ entrance_check_job(param) +- #@ entrance_job(param) #@ for conf in confs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index c3d372da1aa..3a682739135 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -74,32 +74,6 @@ plan: #@ end #@ end -#! Like the entrance_job, with more static checks. -#@ def entrance_check_job(param): -#@ add_res_by_name(param["res_map"], "clang-format-image") -#@ trigger = param["trigger"] -name: entrance -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -plan: -#@ for to_get in trigger["to_get"]: -- trigger: #@ trigger["auto_trigger"] - _: #@ template.replace(to_get) -#@ end -#@ for to_put in trigger["to_put_pre"]: -- #@ to_put -#@ end -- get: clang-format-image -- task: check_clang_format - image: clang-format-image - config: - inputs: - - name: diskquota_src - platform: linux - run: - path: diskquota_src/concourse/scripts/check-clang-format.sh -#@ end - #@ def exit_job(param): #@ trigger = param["trigger"] #@ confs = param["confs"] diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 917f818816a..bb2f273ac9c 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -1,5 +1,5 @@ #@ load("job_def.lib.yml", -#@ "entrance_check_job", +#@ "entrance_job", #@ "exit_pr_job", #@ "build_test_job", #@ "centos6_gpdb6_conf", @@ -27,7 +27,7 @@ jobs: #@ "res_map": res_map, #@ "trigger": trigger, #@ } -- #@ entrance_check_job(param) +- #@ entrance_job(param) #@ for conf in confs: #@ param = { #@ "res_map": res_map, diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 7f9ccfe02f3..82278714c83 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -106,14 +106,6 @@ resources: source: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest -# clang_format -- name: clang-format-image - type: registry-image - source: - repository: gcr.io/data-gpdb-extensions/common/clang-format - tag: 13 - username: _json_key - password: ((extension/extensions-gcs-service-account-key)) # gpdb binary on gcs is located as different folder for different version # Latest build with assertion enabled: @@ -203,58 +195,58 @@ resources: - name: bin_diskquota_gpdb6_rhel6_intermediates type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates/diskquota/diskquota_rhel6_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel7_intermediates type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates/diskquota/diskquota_rhel7_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel8_intermediates type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb6.tar.gz - name: bin_diskquota_gpdb6_ubuntu18_intermediates type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz # Rel - name: bin_diskquota_gpdb6_rhel6_intermediates_rel type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates_release/diskquota/diskquota_rhel6_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel7_intermediates_rel type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates_release/diskquota/diskquota_rhel7_gpdb6.tar.gz - name: bin_diskquota_gpdb6_rhel8_intermediates_rel type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb6.tar.gz - name: bin_diskquota_gpdb6_ubuntu18_intermediates_rel type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates_release/diskquota/diskquota_ubuntu18_gpdb6.tar.gz # For uploading to the release bucket @@ -290,8 +282,8 @@ resources: - name: bin_cmake type: gcs source: - bucket: gpdb-extensions-concourse-resources - json_key: ((extension/extensions-gcs-service-account-key)) + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) regexp: dependencies/cmake-(.*)-linux-x86_64.sh - name: slack_notify diff --git a/concourse/scripts/check-clang-format.sh b/concourse/scripts/check-clang-format.sh deleted file mode 100755 index 963fd1e67be..00000000000 --- a/concourse/scripts/check-clang-format.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# Due to the limitation of concourse git/github-pr resource, it is difficult to -# only check the format of the git diff. So all the source code are being -# checked. - -set -eox pipefail - -src_dir=$(dirname "${BASH_SOURCE[0]}")/../.. -pushd "${src_dir}" -git ls-files '*.c' '*.h' | \ - xargs clang-format --style=file --verbose --Werror -dry-run -popd From c37cfd3ea6e3ccb572a7ae6f7f34601e01c8288c Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 27 Sep 2022 07:28:49 +0800 Subject: [PATCH 215/330] Combine two object_access_hook into one (#220) --- diskquota.c | 3 --- diskquota.h | 1 + diskquota_utility.c | 43 ++----------------------------------------- gp_activetable.c | 5 ++++- 4 files changed, 7 insertions(+), 45 deletions(-) diff --git a/diskquota.c b/diskquota.c index 3072e430a80..a0d1a182fdb 100644 --- a/diskquota.c +++ b/diskquota.c @@ -252,9 +252,6 @@ _PG_init(void) init_disk_quota_enforcement(); init_active_table_hook(); - /* Add dq_object_access_hook to handle drop extension event. */ - register_diskquota_object_access_hook(); - /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { diff --git a/diskquota.h b/diskquota.h index f204f9e267d..bb4cd2a9b46 100644 --- a/diskquota.h +++ b/diskquota.h @@ -239,4 +239,5 @@ extern void init_launcher_shmem(void); extern void vacuum_disk_quota_model(uint32 id); extern void update_monitor_db(Oid dbid, FetchTableStatType action); extern void update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema); +extern void diskquota_stop_worker(void); #endif diff --git a/diskquota_utility.c b/diskquota_utility.c index a5dfc567404..8d601619c57 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -98,9 +98,7 @@ PG_FUNCTION_INFO_V1(pull_all_table_size); ddl_hint_ ? errhint("%s", ddl_hint_) : 0)); \ } while (0) -static object_access_hook_type next_object_access_hook; -static bool is_database_empty(void); -static void dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); +static bool is_database_empty(void); static void ddl_err_code_to_err_message(MessageResult code, const char **err_msg, const char **hint_msg); static int64 get_size_in_mb(char *str); static void set_quota_config_internal(Oid targetoid, int64 quota_limit_mb, QuotaType type, float4 segratio, Oid spcoid); @@ -514,18 +512,8 @@ is_database_empty(void) return is_empty; } -/* - * Add dq_object_access_hook to handle drop extension event. - */ void -register_diskquota_object_access_hook(void) -{ - next_object_access_hook = object_access_hook; - object_access_hook = dq_object_access_hook; -} - -static void -dq_object_access_hook_on_drop(void) +diskquota_stop_worker(void) { int rc, launcher_pid; @@ -584,33 +572,6 @@ dq_object_access_hook_on_drop(void) LWLockRelease(diskquota_locks.extension_ddl_lock); } -/* - * listening on any modify on pg_extension table when: - * DROP: will send CMD_DROP_EXTENSION to diskquota laucher - */ -static void -dq_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg) -{ - if (classId != ExtensionRelationId) goto out; - - if (get_extension_oid("diskquota", true) != objectId) goto out; - - switch (access) - { - case OAT_DROP: - dq_object_access_hook_on_drop(); - break; - case OAT_POST_ALTER: - case OAT_FUNCTION_EXECUTE: - case OAT_POST_CREATE: - case OAT_NAMESPACE_SEARCH: - break; - } - -out: - if (next_object_access_hook) (*next_object_access_hook)(access, classId, objectId, subId, arg); -} - /* * For extension DDL('create extension/drop extension') * Using this function to convert error code from diskquota diff --git a/gp_activetable.c b/gp_activetable.c index e280ed2ce50..3b13002153f 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -190,12 +190,15 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); // if is 'drop extension diskquota' - if (classId == ExtensionRelationId) + if (classId == ExtensionRelationId && access == OAT_DROP) { if (get_extension_oid("diskquota", true) == objectId) { invalidate_database_rejectmap(MyDatabaseId); } + + diskquota_stop_worker(); + return; } /* TODO: do we need to use "&&" instead of "||"? */ From 428bc3166872f8ab728f4911ee2e07de4aa0c138 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 11 Oct 2022 14:35:01 +0800 Subject: [PATCH 216/330] Remove free writer gang in launcher (#233) As launcher and worker both run in bgworker, they are clients when connecting to segments. They know when should close the connections. So we directly call `DisconnectAndDestroyAllGangs` to close them and remove the code that only closes free read gangs based on timeout. For the launcher, we destroy gangs after init_datbase_list or after each time processing create/drop diskquota extension. For workers, we only destroy gangs after the extension is paused. --- diskquota.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/diskquota.c b/diskquota.c index a0d1a182fdb..77f44da90bc 100644 --- a/diskquota.c +++ b/diskquota.c @@ -22,6 +22,7 @@ #include "funcapi.h" #include "access/xact.h" +#include "cdb/cdbgang.h" #include "cdb/cdbvars.h" #include "commands/dbcommands.h" #include "executor/spi.h" @@ -31,7 +32,6 @@ #include "storage/ipc.h" #include "storage/proc.h" #include "storage/sinval.h" -#include "tcop/idle_resource_cleaner.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/faultinjector.h" @@ -517,6 +517,7 @@ disk_quota_worker_main(Datum main_arg) } } + bool is_gang_destroyed = false; while (!got_sigterm) { int rc; @@ -527,6 +528,12 @@ disk_quota_worker_main(Datum main_arg) /* Refresh quota model with init mode */ refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); MyWorkerInfo->dbEntry->inited = true; + is_gang_destroyed = false; + } + else if (!is_gang_destroyed) + { + DisconnectAndDestroyAllGangs(false); + is_gang_destroyed = true; } worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); @@ -626,8 +633,8 @@ disk_quota_launcher_main(Datum main_arg) create_monitor_db_table(); init_database_list(); - EnableClientWaitTimeoutInterrupt(); - StartIdleResourceCleanupTimers(); + DisconnectAndDestroyAllGangs(false); + loop_end = time(NULL); struct timeval nap; @@ -710,9 +717,7 @@ disk_quota_launcher_main(Datum main_arg) if (got_sigusr2) { got_sigusr2 = false; - CancelIdleResourceCleanupTimers(); process_extension_ddl_message(); - StartIdleResourceCleanupTimers(); sigusr2 = true; } @@ -720,9 +725,7 @@ disk_quota_launcher_main(Datum main_arg) if (got_sighup) { got_sighup = false; - CancelIdleResourceCleanupTimers(); ProcessConfigFile(PGC_SIGHUP); - StartIdleResourceCleanupTimers(); } /* @@ -1077,6 +1080,7 @@ do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_ } PG_END_TRY(); } + DisconnectAndDestroyAllGangs(false); } /* From 1be0858022919abf65a361aa8b663fb32f57025f Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 18 Oct 2022 10:23:46 +0800 Subject: [PATCH 217/330] Fix bug: diskquota quota usage doesn't include uncommitted table (#228) * Fix bug: diskquota quota usage doesn't include uncommitted table Modify the show_fast_schema_quota_view, show_fast_role_quota_view, show_fast_schema_tablespace_quota_view and show_fast_role_tablespace_quota_view. Combine pg_class with relation_cache into all_relation, and join all_relation with diskquota.table_size to calculate quota usage. Add upgrade and downgrade procedure between diskquota2.0 and diskquota2.1. Add upgrade and downgrade test for diskquota-2.1. --- CMakeLists.txt | 5 +- VERSION | 2 +- diskquota--2.0--2.1.sql | 210 ++++++++++++ diskquota--2.1--2.0.sql | 200 +++++++++++ diskquota--2.1.sql | 318 ++++++++++++++++++ diskquota.control | 2 +- .../expected/test_fast_quota_view.out | 184 ++++++++++ tests/isolation2/isolation2_schedule | 1 + tests/isolation2/sql/test_fast_quota_view.sql | 75 +++++ upgrade_test/CMakeLists.txt | 2 + .../2.0_test_in_2.1_quota_create_in_2.0.out | 16 + upgrade_test/expected/2.1_catalog.out | 303 +++++++++++++++++ upgrade_test/expected/2.1_cleanup_quota.out | 1 + upgrade_test/expected/2.1_install.out | 13 + .../expected/2.1_migrate_to_version_2.1.out | 10 + upgrade_test/expected/2.1_set_quota.out | 63 ++++ .../2.1_test_in_2.0_quota_create_in_2.1.out | 16 + upgrade_test/schedule_2.0--2.1 | 8 + upgrade_test/schedule_2.1--2.0 | 8 + .../2.0_test_in_2.1_quota_create_in_2.0.sql | 17 + upgrade_test/sql/2.1_catalog.sql | 81 +++++ upgrade_test/sql/2.1_cleanup_quota.sql | 1 + upgrade_test/sql/2.1_install.sql | 17 + .../sql/2.1_migrate_to_version_2.1.sql | 8 + upgrade_test/sql/2.1_set_quota.sql | 44 +++ .../2.1_test_in_2.0_quota_create_in_2.1.sql | 16 + 26 files changed, 1618 insertions(+), 3 deletions(-) create mode 100644 diskquota--2.0--2.1.sql create mode 100644 diskquota--2.1--2.0.sql create mode 100644 diskquota--2.1.sql create mode 100644 tests/isolation2/expected/test_fast_quota_view.out create mode 100644 tests/isolation2/sql/test_fast_quota_view.sql create mode 100644 upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out create mode 100644 upgrade_test/expected/2.1_catalog.out create mode 100644 upgrade_test/expected/2.1_cleanup_quota.out create mode 100644 upgrade_test/expected/2.1_install.out create mode 100644 upgrade_test/expected/2.1_migrate_to_version_2.1.out create mode 100644 upgrade_test/expected/2.1_set_quota.out create mode 100644 upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out create mode 100644 upgrade_test/schedule_2.0--2.1 create mode 100644 upgrade_test/schedule_2.1--2.0 create mode 100644 upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql create mode 100644 upgrade_test/sql/2.1_catalog.sql create mode 100644 upgrade_test/sql/2.1_cleanup_quota.sql create mode 100644 upgrade_test/sql/2.1_install.sql create mode 100644 upgrade_test/sql/2.1_migrate_to_version_2.1.sql create mode 100644 upgrade_test/sql/2.1_set_quota.sql create mode 100644 upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql diff --git a/CMakeLists.txt b/CMakeLists.txt index cefc9b42f84..a16d6d6dca6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,7 +81,10 @@ list( diskquota--1.0.sql diskquota--1.0--2.0.sql diskquota--2.0.sql - diskquota--2.0--1.0.sql) + diskquota--2.0--1.0.sql + diskquota--2.1.sql + diskquota--2.0--2.1.sql + diskquota--2.1--2.0.sql) add_library(diskquota MODULE ${diskquota_SRC}) diff --git a/VERSION b/VERSION index 38f77a65b30..7ec1d6db408 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.1 +2.1.0 diff --git a/diskquota--2.0--2.1.sql b/diskquota--2.0--2.1.sql new file mode 100644 index 00000000000..a87cae7f742 --- /dev/null +++ b/diskquota--2.0--2.1.sql @@ -0,0 +1,210 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.1.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + + +-- views +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +-- view end \ No newline at end of file diff --git a/diskquota--2.1--2.0.sql b/diskquota--2.1--2.0.sql new file mode 100644 index 00000000000..16c92e4d252 --- /dev/null +++ b/diskquota--2.1--2.0.sql @@ -0,0 +1,200 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.0.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + + +-- views +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +DROP VIEW diskquota.show_all_relation_view; +-- view end \ No newline at end of file diff --git a/diskquota--2.1.sql b/diskquota--2.1.sql new file mode 100644 index 00000000000..de1f6f6f4e0 --- /dev/null +++ b/diskquota--2.1.sql @@ -0,0 +1,318 @@ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT 0, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[] +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +CREATE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- view end + +-- prepare to boot +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT FROM diskquota.resume(); + + +-- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota.control b/diskquota.control index 9a73625f62b..628714b5201 100644 --- a/diskquota.control +++ b/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' -default_version = '2.0' +default_version = '2.1' module_pathname = 'do-not-use-this' relocatable = true diff --git a/tests/isolation2/expected/test_fast_quota_view.out b/tests/isolation2/expected/test_fast_quota_view.out new file mode 100644 index 00000000000..3bdc09074ff --- /dev/null +++ b/tests/isolation2/expected/test_fast_quota_view.out @@ -0,0 +1,184 @@ +CREATE SCHEMA s1; +CREATE +CREATE SCHEMA s2; +CREATE + +CREATE ROLE r LOGIN SUPERUSER; +CREATE + +-- start_ignore +!\retcode mkdir -p /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode mkdir -p /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) +-- end_ignore +DROP TABLESPACE IF EXISTS spc1; +DROP +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +CREATE +DROP TABLESPACE IF EXISTS spc2; +DROP +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; +CREATE + +SELECT diskquota.set_schema_quota('s1', '100 MB'); + set_schema_quota +------------------ + +(1 row) +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +SELECT diskquota.set_role_quota('r', '100 MB'); + set_role_quota +---------------- + +(1 row) +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s1.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s2.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +-- login r to test role quota view +1: SET ROLE r; +SET + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t1 SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t2 SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +DROP TABLE IF EXISTS s1.t; +DROP +DROP TABLE IF EXISTS s2.t; +DROP +DROP TABLE IF EXISTS t1; +DROP +DROP TABLE IF EXISTS t2; +DROP + +DROP SCHEMA IF EXISTS s1; +DROP +DROP SCHEMA IF EXISTS s2; +DROP +DROP ROLE IF EXISTS r; +DROP + +-- start_ignore +!\retcode rm -r /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode rm -r /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) +-- end_ignore +DROP TABLESPACE IF EXISTS spc1; +DROP +DROP TABLESPACE IF EXISTS spc2; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index af3da7127de..bf4ca896373 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -1,5 +1,6 @@ test: config test: test_create_extension +test: test_fast_quota_view test: test_relation_size test: test_rejectmap test: test_vacuum diff --git a/tests/isolation2/sql/test_fast_quota_view.sql b/tests/isolation2/sql/test_fast_quota_view.sql new file mode 100644 index 00000000000..c031576b34a --- /dev/null +++ b/tests/isolation2/sql/test_fast_quota_view.sql @@ -0,0 +1,75 @@ +CREATE SCHEMA s1; +CREATE SCHEMA s2; + +CREATE ROLE r LOGIN SUPERUSER; + +-- start_ignore +!\retcode mkdir -p /tmp/spc1; +!\retcode mkdir -p /tmp/spc2; +-- end_ignore +DROP TABLESPACE IF EXISTS spc1; +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +DROP TABLESPACE IF EXISTS spc2; +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; + +SELECT diskquota.set_schema_quota('s1', '100 MB'); +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); +SELECT diskquota.set_role_quota('r', '100 MB'); +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +1: INSERT INTO s1.t SELECT generate_series(1, 100000); + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +1: INSERT INTO s2.t SELECT generate_series(1, 100000); + +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + +1: COMMIT; +2: SELECT diskquota.wait_for_worker_new_epoch(); +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + +-- login r to test role quota view +1: SET ROLE r; + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +1: INSERT INTO t1 SELECT generate_series(1, 100000); + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +1: INSERT INTO t2 SELECT generate_series(1, 100000); + +1: SELECT diskquota.wait_for_worker_new_epoch(); + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + +1: COMMIT; +2: SELECT diskquota.wait_for_worker_new_epoch(); +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + +DROP TABLE IF EXISTS s1.t; +DROP TABLE IF EXISTS s2.t; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +DROP SCHEMA IF EXISTS s1; +DROP SCHEMA IF EXISTS s2; +DROP ROLE IF EXISTS r; + +-- start_ignore +!\retcode rm -r /tmp/spc1; +!\retcode rm -r /tmp/spc2; +-- end_ignore +DROP TABLESPACE IF EXISTS spc1; +DROP TABLESPACE IF EXISTS spc2; diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 79af27813d5..63ef32df102 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -13,6 +13,8 @@ regresstarget_add( SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--2.1 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.0 REGRESS_OPTS --dbname=contrib_regression) diff --git a/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out b/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out new file mode 100644 index 00000000000..a36fcb4f8cd --- /dev/null +++ b/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out @@ -0,0 +1,16 @@ +-- need run 2.0_set_quota before run this test +-- FIXME add version check here +\!sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected/2.1_catalog.out b/upgrade_test/expected/2.1_catalog.out new file mode 100644 index 00000000000..7582b33a2b1 --- /dev/null +++ b/upgrade_test/expected/2.1_catalog.out @@ -0,0 +1,303 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+---------------------------------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} + target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} +(17 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | {target_rowid_seq} | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.1.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.1.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.1.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.1.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.1.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | + | | | | | FROM pg_class WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.1.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.1.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.1.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.1.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.1.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.1.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.1.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.1.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.1.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.1.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.1.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.1.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid)+ + | | ) + + | | SELECT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation + + | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = (-1)))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected/2.1_cleanup_quota.out b/upgrade_test/expected/2.1_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected/2.1_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected/2.1_install.out b/upgrade_test/expected/2.1_install.out new file mode 100644 index 00000000000..b8f98bb9278 --- /dev/null +++ b/upgrade_test/expected/2.1_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/2.1_migrate_to_version_2.1.out b/upgrade_test/expected/2.1_migrate_to_version_2.1.out new file mode 100644 index 00000000000..37ee511afcb --- /dev/null +++ b/upgrade_test/expected/2.1_migrate_to_version_2.1.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.1.so +Segment value: diskquota-2.1.so +\c +alter extension diskquota update to '2.1'; +\! sleep 5 diff --git a/upgrade_test/expected/2.1_set_quota.out b/upgrade_test/expected/2.1_set_quota.out new file mode 100644 index 00000000000..5d34aad003a --- /dev/null +++ b/upgrade_test/expected/2.1_set_quota.out @@ -0,0 +1,63 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.1.so +Segment value: diskquota-2.1.so +create extension diskquota with version '2.1'; +\!sleep 5 +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out b/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out new file mode 100644 index 00000000000..5c3f8c87862 --- /dev/null +++ b/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out @@ -0,0 +1,16 @@ +-- need run 2.1_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/schedule_2.0--2.1 b/upgrade_test/schedule_2.0--2.1 new file mode 100644 index 00000000000..7aa1f1a2e3b --- /dev/null +++ b/upgrade_test/schedule_2.0--2.1 @@ -0,0 +1,8 @@ +test: 2.0_install +test: 2.0_set_quota +test: 2.0_catalog +test: 2.1_migrate_to_version_2.1 +test: 2.1_catalog +# run 2.0 behavior test using 2.1 DDL and binary +test: 2.0_test_in_2.1_quota_create_in_2.0 +test: 2.0_cleanup_quota diff --git a/upgrade_test/schedule_2.1--2.0 b/upgrade_test/schedule_2.1--2.0 new file mode 100644 index 00000000000..24590df38c9 --- /dev/null +++ b/upgrade_test/schedule_2.1--2.0 @@ -0,0 +1,8 @@ +test: 2.1_install +test: 2.1_set_quota +test: 2.1_catalog +test: 2.0_migrate_to_version_2.0 +test: 2.0_catalog +# run 2.1 behavior test using 2.0 DDL and binary +test: 2.1_test_in_2.0_quota_create_in_2.1 +test: 2.1_cleanup_quota diff --git a/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql b/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql new file mode 100644 index 00000000000..c2d9dbe33ea --- /dev/null +++ b/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql @@ -0,0 +1,17 @@ +-- need run 2.0_set_quota before run this test +-- FIXME add version check here + +\!sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; + diff --git a/upgrade_test/sql/2.1_catalog.sql b/upgrade_test/sql/2.1_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/upgrade_test/sql/2.1_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/sql/2.1_cleanup_quota.sql b/upgrade_test/sql/2.1_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/sql/2.1_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/sql/2.1_install.sql b/upgrade_test/sql/2.1_install.sql new file mode 100644 index 00000000000..645c6b34eae --- /dev/null +++ b/upgrade_test/sql/2.1_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/2.1_migrate_to_version_2.1.sql b/upgrade_test/sql/2.1_migrate_to_version_2.1.sql new file mode 100644 index 00000000000..d9c9bc96d6a --- /dev/null +++ b/upgrade_test/sql/2.1_migrate_to_version_2.1.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.1'; +\! sleep 5 diff --git a/upgrade_test/sql/2.1_set_quota.sql b/upgrade_test/sql/2.1_set_quota.sql new file mode 100644 index 00000000000..9711d953732 --- /dev/null +++ b/upgrade_test/sql/2.1_set_quota.sql @@ -0,0 +1,44 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.1'; +\!sleep 5 + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql b/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql new file mode 100644 index 00000000000..974df545602 --- /dev/null +++ b/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql @@ -0,0 +1,16 @@ +-- need run 2.1_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; From b19cfb7de588d5f7da1f71b3f58b0895b835401d Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 18 Oct 2022 14:41:06 +0800 Subject: [PATCH 218/330] Fix flaky test (#240) --- tests/regress/expected/test_ctas_role.out | 19 +++++++++++++++++-- tests/regress/expected/test_extension.out | 2 +- tests/regress/sql/test_ctas_role.sql | 7 +++++-- tests/regress/sql/test_extension.sql | 2 +- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index db688a3fd73..174e43f8090 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -1,6 +1,21 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" +20221018:11:33:42:045232 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.hard_limit -v on' +\! gpstop -u +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Starting gpstop with args: -u +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Gathering information and validating the environment... +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Obtaining Segment details from master... +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.5.g4bb76bee5c build dev' +20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Signalling all postmaster processes to reload +-- end_ignore +SHOW diskquota.hard_limit; + diskquota.hard_limit +---------------------- + on +(1 row) + CREATE ROLE hardlimit_r; NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 579b05b80c8..4d53bccbc06 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -59,7 +59,7 @@ show diskquota.max_workers; 20 (1 row) -\! sleep 1.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 3; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l 2 -- FIXME: We need to sleep for a while each time after CREATE EXTENSION and -- DROP EXTENSION to wait for the bgworker to start or to exit. diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql index ef554bb9331..ce1c1d19022 100644 --- a/tests/regress/sql/test_ctas_role.sql +++ b/tests/regress/sql/test_ctas_role.sql @@ -1,6 +1,9 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" +\! gpstop -u +-- end_ignore +SHOW diskquota.hard_limit; CREATE ROLE hardlimit_r; SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 1e2c5ca6dc6..141936b3f10 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -20,7 +20,7 @@ CREATE DATABASE dbx10 ; show max_worker_processes; show diskquota.max_workers; -\! sleep 1.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +\! sleep 3; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -- FIXME: We need to sleep for a while each time after CREATE EXTENSION and -- DROP EXTENSION to wait for the bgworker to start or to exit. From f45407a8c66820c601b81a2ac2c8c728ea29203a Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Wed, 19 Oct 2022 10:11:09 +0800 Subject: [PATCH 219/330] Fix flaky test test_ctas_role (#242) creating table runs too fast to trigger the hard quota limit. --- tests/regress/expected/test_ctas_role.out | 19 +++---------------- tests/regress/sql/test_ctas_role.sql | 7 +++---- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index 174e43f8090..be01152985c 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -1,21 +1,8 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -- start_ignore -\! gpconfig -c "diskquota.hard_limit" -v "on" -20221018:11:33:42:045232 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.hard_limit -v on' -\! gpstop -u -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Starting gpstop with args: -u -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Gathering information and validating the environment... -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Obtaining Segment details from master... -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.5.g4bb76bee5c build dev' -20221018:11:33:43:045341 gpstop:wxiaoran-a02:wxiaoran-[INFO]:-Signalling all postmaster processes to reload +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- end_ignore -SHOW diskquota.hard_limit; - diskquota.hard_limit ----------------------- - on -(1 row) - CREATE ROLE hardlimit_r; NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); @@ -27,7 +14,7 @@ SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 10000000) DISTRIBUTED BY (i); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded diff --git a/tests/regress/sql/test_ctas_role.sql b/tests/regress/sql/test_ctas_role.sql index ce1c1d19022..93e1c628550 100644 --- a/tests/regress/sql/test_ctas_role.sql +++ b/tests/regress/sql/test_ctas_role.sql @@ -1,16 +1,15 @@ -- Test that diskquota is able to cancel a running CTAS query by the role quota. -- start_ignore -\! gpconfig -c "diskquota.hard_limit" -v "on" -\! gpstop -u +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null -- end_ignore -SHOW diskquota.hard_limit; CREATE ROLE hardlimit_r; SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 10000000) DISTRIBUTED BY (i); +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); SELECT diskquota.wait_for_worker_new_epoch(); -- temp table From 007df5d09bd73102ea3106997c37b3ae7cfd6a8f Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 20 Oct 2022 11:38:19 +0800 Subject: [PATCH 220/330] Support at least 20000 tables for each database (#239) Currently, diskquota supports max to 8000 tables, including partition tables. It may cause unexpected results if there are too many tables in the customer cluster. This PR change `init_size` of table_size_map to 20000, instead of MAX_TABLES, so that we can economize the actual usage of SHM. To handle scenarios with a large number of tables, we set `max_size` to MAX_TABLES (1000000). And To avoid memory waste, only the master should be allocated shared memory for diskquota_launcher and diskquota_worker. --- quotamodel.c | 85 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 24 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 47cd7fa2451..4d859658c32 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -44,7 +44,10 @@ /* cluster level max size of rejectmap */ #define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) -#define MAX_TABLES (1024L * 8) +/* init size of table_size_map */ +#define INIT_TABLES (20 * 1024) +/* max size of table_size_map */ +#define MAX_TABLES (1024 * 1024) /* cluster level init size of rejectmap */ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ @@ -68,18 +71,24 @@ int SEGCOUNT = 0; */ struct TableSizeEntry { - Oid reloid; - int16 segid; - Oid tablespaceoid; - Oid namespaceoid; - Oid owneroid; + Oid reloid; + int segid; + Oid tablespaceoid; + Oid namespaceoid; + Oid owneroid; + uint32 flag; /* flag's each bit is used to show the table's status, + * which is described in TableSizeEntryFlag. + */ int64 totalsize; /* table size including fsm, visibility map * etc. */ - bool is_exist; /* flag used to check whether table is already - * dropped */ - bool need_flush; /* whether need to flush to table table_size */ }; +typedef enum +{ + TABLE_EXIST = (1 << 0), /* whether table is already dropped */ + TABLE_NEED_FLUSH = (1 << 1) /* whether need to flush to table table_size */ +} TableSizeEntryFlag; + /* * table disk size and corresponding schema and owner */ @@ -194,6 +203,10 @@ static void export_exceeded_error(GlobalRejectMapEntry *entry, bool skip_name); void truncateStringInfo(StringInfo str, int nchars); static void format_name(const char *prefix, uint32 id, StringInfo str); +static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); + /* add a new entry quota or update the old entry quota */ static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) @@ -440,7 +453,7 @@ static Size diskquota_worker_shmem_size() { Size size; - size = hash_estimate_size(1024 * 1024, sizeof(TableSizeEntry)); + size = hash_estimate_size(MAX_TABLES, sizeof(TableSizeEntry)); size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); size = add_size(size, hash_estimate_size(1024L, sizeof(struct QuotaMapEntry)) * NUM_QUOTA_TYPES); return size; @@ -462,8 +475,13 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, sizeof(struct MonitorDBEntryStruct))); // monitored_dbid_cache - size = add_size(size, diskquota_launcher_shmem_size()); - size = add_size(size, diskquota_worker_shmem_size() * MAX_NUM_MONITORED_DB); + + if (IS_QUERY_DISPATCHER()) + { + size = add_size(size, diskquota_launcher_shmem_size()); + size = add_size(size, diskquota_worker_shmem_size() * MAX_NUM_MONITORED_DB); + } + return size; } @@ -484,7 +502,7 @@ init_disk_quota_model(uint32 id) hash_ctl.hash = tag_hash; format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, MAX_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + table_size_map = ShmemInitHash(str.data, INIT_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); /* for localrejectmap */ memset(&hash_ctl, 0, sizeof(hash_ctl)); @@ -542,7 +560,7 @@ vacuum_disk_quota_model(uint32 id) hash_ctl.hash = tag_hash; format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, MAX_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + table_size_map = ShmemInitHash(str.data, INIT_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { @@ -834,7 +852,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { - tsentry->is_exist = false; + reset_table_size_entry_flag(tsentry, TABLE_EXIST); } /* @@ -903,11 +921,12 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) tsentry->owneroid = InvalidOid; tsentry->namespaceoid = InvalidOid; tsentry->tablespaceoid = InvalidOid; - tsentry->need_flush = true; + tsentry->flag = 0; + set_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); } /* mark tsentry is_exist */ - if (tsentry) tsentry->is_exist = true; + if (tsentry) set_table_size_entry_flag(tsentry, TABLE_EXIST); active_table_entry = (DiskQuotaActiveTableEntry *)hash_search(local_active_table_stat_map, &key, HASH_FIND, &active_tbl_found); @@ -927,8 +946,8 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) updated_total_size = active_table_entry->tablesize - tsentry->totalsize; /* update the table_size entry */ - tsentry->totalsize = (int64)active_table_entry->tablesize; - tsentry->need_flush = true; + tsentry->totalsize = (int64)active_table_entry->tablesize; + set_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does * not exist in the table_size_map */ @@ -942,7 +961,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) /* table size info doesn't need to flush at init quota model stage */ if (is_init) { - tsentry->need_flush = false; + reset_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); } /* if schema change, transfer the file size */ @@ -992,7 +1011,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { - if (tsentry->is_exist == false) + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, tsentry->segid); update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, tsentry->segid); @@ -1040,7 +1059,7 @@ flush_to_table_size(void) while ((tsentry = hash_seq_search(&iter)) != NULL) { /* delete dropped table from both table_size_map and table table_size */ - if (tsentry->is_exist == false) + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); delete_statement_flag = true; @@ -1048,9 +1067,9 @@ flush_to_table_size(void) hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); } /* update the table size by delete+insert in table table_size */ - else if (tsentry->need_flush == true) + else if (get_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH)) { - tsentry->need_flush = false; + reset_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); delete_statement_flag = true; @@ -2103,3 +2122,21 @@ format_name(const char *prefix, uint32 id, StringInfo str) appendStringInfo(str, "%s_%u", prefix, id); Assert(str->len <= SHMEM_INDEX_KEYSIZE); } + +static bool +get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + return (entry->flag & flag) ? true : false; +} + +static void +reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + entry->flag &= (UINT32_MAX ^ flag); +} + +static void +set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag) +{ + entry->flag |= flag; +} From e6e501e4c3287fda93385013d57b4f60f3613b7e Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 24 Oct 2022 14:24:23 +0800 Subject: [PATCH 221/330] Reduce MAX_TABLES to 200K. (#243) The scale of 50 databases with 200K tables is large enough, so we reduce MAX_TABLES to 200K to avoid memory waste. --- quotamodel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quotamodel.c b/quotamodel.c index 4d859658c32..00a520289eb 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -47,7 +47,7 @@ /* init size of table_size_map */ #define INIT_TABLES (20 * 1024) /* max size of table_size_map */ -#define MAX_TABLES (1024 * 1024) +#define MAX_TABLES (200 * 1024) /* cluster level init size of rejectmap */ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ From 5de75db8310c3dd3eac4a7fdb4f5eb615e5c45ef Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Fri, 28 Oct 2022 17:38:26 +0800 Subject: [PATCH 222/330] Fix test_rejectmap flaky test (#246) Why the test case can run successfully before, but sometimes fails? Because it adds the reject entry after starting inserting data into the table. If the inserting has hang up on `check_rejectmap_by_relfilenode` and then adding the reject entry, hardlimit will be triggered. Otherwise, softlimit will be triggered. --- quotamodel.c | 1 - tests/isolation2/expected/test_rejectmap.out | 39 ++++++++++++++++++++ tests/isolation2/sql/test_rejectmap.sql | 21 +++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/quotamodel.c b/quotamodel.c index 00a520289eb..d1a1b80ba2e 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1557,7 +1557,6 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) #ifdef FAULT_INJECTOR if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; #endif - if (relfilenode && enable_hardlimit) return check_rejectmap_by_relfilenode(*relfilenode); return true; diff --git a/tests/isolation2/expected/test_rejectmap.out b/tests/isolation2/expected/test_rejectmap.out index 98401ee6f09..9c1de69a9ce 100644 --- a/tests/isolation2/expected/test_rejectmap.out +++ b/tests/isolation2/expected/test_rejectmap.out @@ -33,6 +33,12 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 @@ -72,6 +78,12 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 @@ -111,6 +123,12 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 @@ -152,6 +170,12 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); block_relation_on_seg0 @@ -188,6 +212,13 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi Success: (1 row) 1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); block_relation_on_seg0 ------------------------ @@ -218,7 +249,15 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -------------------------- Success: (1 row) + 1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); block_relation_on_seg0 ------------------------ diff --git a/tests/isolation2/sql/test_rejectmap.sql b/tests/isolation2/sql/test_rejectmap.sql index 7ddbb42d9a5..ca2226010d9 100644 --- a/tests/isolation2/sql/test_rejectmap.sql +++ b/tests/isolation2/sql/test_rejectmap.sql @@ -79,6 +79,9 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); @@ -103,6 +106,9 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); @@ -127,6 +133,9 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); @@ -152,6 +161,9 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbi -- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). 1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- Dispatch rejectmap to seg0. SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); @@ -173,6 +185,10 @@ INSERT INTO blocked_t5 SELECT generate_series(1, 100); SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; 1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; @@ -188,7 +204,12 @@ INSERT INTO blocked_t6 SELECT generate_series(1, 100); -- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + 1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; From 95a6359fe9511ce3854dabd884c38a90ae53485e Mon Sep 17 00:00:00 2001 From: gp-releng Date: Fri, 28 Oct 2022 02:40:35 -0700 Subject: [PATCH 223/330] Add SECURITY.md --- SECURITY.md | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..330b16138e8 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,58 @@ +# Security Release Process + +Greenplum Database has adopted this security disclosure and response policy to ensure we responsibly handle critical issues. + +## Reporting a Vulnerability - Private Disclosure Process + +Security is of the highest importance and all security vulnerabilities or suspected security vulnerabilities should be reported to Greenplum Database privately, to minimize attacks against current users of Greenplum Database before they are fixed. Vulnerabilities will be investigated and patched on the next patch (or minor) release as soon as possible. This information could be kept entirely internal to the project. + +If you know of a publicly disclosed security vulnerability for Greenplum Database, please **IMMEDIATELY** contact the Greenplum Database project team (security@greenplum.org). + +**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities** + +To report a vulnerability or a security-related issue, please contact the email address with the details of the vulnerability. The email will be fielded by the Greenplum Database project team. Emails will be addressed promptly, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/greenplum-db/gpdb/issues) instead. + +## Proposed Email Content + +Provide a descriptive subject line and in the body of the email include the following information: + +* Basic identity information, such as your name and your affiliation or company. +* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and logs are all helpful to us). +* Description of the effects of the vulnerability on Greenplum Database and the related hardware and software configurations, so that the Greenplum Database project team can reproduce it. +* How the vulnerability affects Greenplum Database usage and an estimation of the attack surface, if there is one. +* List other projects or dependencies that were used in conjunction with Greenplum Database to produce the vulnerability. + +## When to report a vulnerability + +* When you think Greenplum Database has a potential security vulnerability. +* When you suspect a potential vulnerability but you are unsure that it impacts Greenplum Database. +* When you know of or suspect a potential vulnerability on another project that is used by Greenplum Database. + +## Patch, Release, and Disclosure + +The Greenplum Database project team will respond to vulnerability reports as follows: + +1. The Greenplum project team will investigate the vulnerability and determine its effects and criticality. +2. If the issue is not deemed to be a vulnerability, the Greenplum project team will follow up with a detailed reason for rejection. +3. The Greenplum project team will initiate a conversation with the reporter promptly. +4. If a vulnerability is acknowledged and the timeline for a fix is determined, the Greenplum project team will work on a plan to communicate with the appropriate community, including identifying mitigating steps that affected users can take to protect themselves until the fix is rolled out. +5. The Greenplum project team will also create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Greenplum project team makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. Issues may also be reported to [Mitre](https://cve.mitre.org/) using this [scoring calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will initially be set to private. +6. The Greenplum project team will work on fixing the vulnerability and perform internal testing before preparing to roll out the fix. +7. A public disclosure date is negotiated by the Greenplum Database project team, and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, or the solution is not well-tested. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. The Greenplum Database project team holds the final say when setting a public disclosure date. +8. Once the fix is confirmed, the Greenplum project team will patch the vulnerability in the next patch or minor release, and backport a patch release into earlier supported releases as necessary. Upon release of the patched version of Greenplum Database, we will follow the **Public Disclosure Process**. + +## Public Disclosure Process + +The Greenplum project team publishes a [public advisory](https://github.com/greenplum-db/gpdb/security/advisories?state=published) to the Greenplum Database community via GitHub. In most cases, additional communication via Slack, Twitter, mailing lists, blog and other channels will assist in educating Greenplum Database users and rolling out the patched release to affected users. + +The Greenplum project team will also publish any mitigating steps users can take until the fix can be applied to their Greenplum Database instances. + +## Mailing lists + +* Use security@greenplum.org to report security concerns to the Greenplum Database project team, who uses the list to privately discuss security issues and fixes prior to disclosure. + +## Confidentiality, integrity and availability + +We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The Greenplum Database project team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner. + +Note that we do not currently consider the default settings for Greenplum Database to be secure-by-default. It is necessary for operators to explicitly configure settings, role based access control, and other resource related features in Greenplum Database to provide a hardened Greenplum Database environment. We will not act on any security disclosure that relates to a lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, taking into account backwards compatibility. From 55f73b62b7de4b9a6dbbdfffe8034d6f66ce5dd9 Mon Sep 17 00:00:00 2001 From: GP Releng Bot <46203582+gp-releng@users.noreply.github.com> Date: Wed, 2 Nov 2022 13:09:26 +0800 Subject: [PATCH 224/330] Add SECURITY.md (#249) --- SECURITY.md | 123 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 28 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 330b16138e8..fabbb9d2eb9 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,58 +1,125 @@ # Security Release Process -Greenplum Database has adopted this security disclosure and response policy to ensure we responsibly handle critical issues. +Greenplum Database has adopted this security disclosure and response policy to +ensure we responsibly handle critical issues. ## Reporting a Vulnerability - Private Disclosure Process -Security is of the highest importance and all security vulnerabilities or suspected security vulnerabilities should be reported to Greenplum Database privately, to minimize attacks against current users of Greenplum Database before they are fixed. Vulnerabilities will be investigated and patched on the next patch (or minor) release as soon as possible. This information could be kept entirely internal to the project. +Security is of the highest importance and all security vulnerabilities or +suspected security vulnerabilities should be reported to Greenplum Database +privately, to minimize attacks against current users of Greenplum Database +before they are fixed. Vulnerabilities will be investigated and patched on the +next patch (or minor) release as soon as possible. This information could be +kept entirely internal to the project. -If you know of a publicly disclosed security vulnerability for Greenplum Database, please **IMMEDIATELY** contact the Greenplum Database project team (security@greenplum.org). +If you know of a publicly disclosed security vulnerability for Greenplum +Database, please **IMMEDIATELY** contact the Greenplum Database project team +(security@greenplum.org). -**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities** +**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities!** -To report a vulnerability or a security-related issue, please contact the email address with the details of the vulnerability. The email will be fielded by the Greenplum Database project team. Emails will be addressed promptly, including a detailed plan to investigate the issue and any potential workarounds to perform in the meantime. Do not report non-security-impacting bugs through this channel. Use [GitHub issues](https://github.com/greenplum-db/gpdb/issues) instead. +To report a vulnerability or a security-related issue, please contact the email +address with the details of the vulnerability. The email will be fielded by the +Greenplum Database project team. Emails will be addressed promptly, including a +detailed plan to investigate the issue and any potential workarounds to perform +in the meantime. Do not report non-security-impacting bugs through this +channel. Use [GitHub issues](https://github.com/greenplum-db/gpdb/issues) +instead. ## Proposed Email Content -Provide a descriptive subject line and in the body of the email include the following information: +Provide a descriptive subject line and in the body of the email include the +following information: * Basic identity information, such as your name and your affiliation or company. -* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and logs are all helpful to us). -* Description of the effects of the vulnerability on Greenplum Database and the related hardware and software configurations, so that the Greenplum Database project team can reproduce it. -* How the vulnerability affects Greenplum Database usage and an estimation of the attack surface, if there is one. -* List other projects or dependencies that were used in conjunction with Greenplum Database to produce the vulnerability. +* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and + logs are all helpful to us). +* Description of the effects of the vulnerability on Greenplum Database and the + related hardware and software configurations, so that the Greenplum Database + project team can reproduce it. +* How the vulnerability affects Greenplum Database usage and an estimation of + the attack surface, if there is one. +* List other projects or dependencies that were used in conjunction with + Greenplum Database to produce the vulnerability. ## When to report a vulnerability * When you think Greenplum Database has a potential security vulnerability. -* When you suspect a potential vulnerability but you are unsure that it impacts Greenplum Database. -* When you know of or suspect a potential vulnerability on another project that is used by Greenplum Database. +* When you suspect a potential vulnerability but you are unsure that it impacts + Greenplum Database. +* When you know of or suspect a potential vulnerability on another project that + is used by Greenplum Database. ## Patch, Release, and Disclosure -The Greenplum Database project team will respond to vulnerability reports as follows: - -1. The Greenplum project team will investigate the vulnerability and determine its effects and criticality. -2. If the issue is not deemed to be a vulnerability, the Greenplum project team will follow up with a detailed reason for rejection. -3. The Greenplum project team will initiate a conversation with the reporter promptly. -4. If a vulnerability is acknowledged and the timeline for a fix is determined, the Greenplum project team will work on a plan to communicate with the appropriate community, including identifying mitigating steps that affected users can take to protect themselves until the fix is rolled out. -5. The Greenplum project team will also create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Greenplum project team makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. Issues may also be reported to [Mitre](https://cve.mitre.org/) using this [scoring calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will initially be set to private. -6. The Greenplum project team will work on fixing the vulnerability and perform internal testing before preparing to roll out the fix. -7. A public disclosure date is negotiated by the Greenplum Database project team, and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation or patch is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, or the solution is not well-tested. The timeframe for disclosure is from immediate (especially if it’s already publicly known) to a few weeks. The Greenplum Database project team holds the final say when setting a public disclosure date. -8. Once the fix is confirmed, the Greenplum project team will patch the vulnerability in the next patch or minor release, and backport a patch release into earlier supported releases as necessary. Upon release of the patched version of Greenplum Database, we will follow the **Public Disclosure Process**. +The Greenplum Database project team will respond to vulnerability reports as +follows: + +1. The Greenplum project team will investigate the vulnerability and determine +its effects and criticality. +2. If the issue is not deemed to be a vulnerability, the Greenplum project team +will follow up with a detailed reason for rejection. +3. The Greenplum project team will initiate a conversation with the reporter +promptly. +4. If a vulnerability is acknowledged and the timeline for a fix is determined, +the Greenplum project team will work on a plan to communicate with the +appropriate community, including identifying mitigating steps that affected +users can take to protect themselves until the fix is rolled out. +5. The Greenplum project team will also create a +[CVSS](https://www.first.org/cvss/specification-document) using the [CVSS +Calculator](https://www.first.org/cvss/calculator/3.0). The Greenplum project +team makes the final call on the calculated CVSS; it is better to move quickly +than making the CVSS perfect. Issues may also be reported to +[Mitre](https://cve.mitre.org/) using this [scoring +calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will +initially be set to private. +6. The Greenplum project team will work on fixing the vulnerability and perform +internal testing before preparing to roll out the fix. +7. A public disclosure date is negotiated by the Greenplum Database project +team, and the bug submitter. We prefer to fully disclose the bug as soon as +possible once a user mitigation or patch is available. It is reasonable to +delay disclosure when the bug or the fix is not yet fully understood, or the +solution is not well-tested. The timeframe for disclosure is from immediate +(especially if it’s already publicly known) to a few weeks. The Greenplum +Database project team holds the final say when setting a public disclosure +date. +8. Once the fix is confirmed, the Greenplum project team will patch the +vulnerability in the next patch or minor release, and backport a patch release +into earlier supported releases as necessary. Upon release of the patched +version of Greenplum Database, we will follow the **Public Disclosure +Process**. ## Public Disclosure Process -The Greenplum project team publishes a [public advisory](https://github.com/greenplum-db/gpdb/security/advisories?state=published) to the Greenplum Database community via GitHub. In most cases, additional communication via Slack, Twitter, mailing lists, blog and other channels will assist in educating Greenplum Database users and rolling out the patched release to affected users. +The Greenplum project team publishes a [public +advisory](https://github.com/greenplum-db/gpdb/security/advisories?state=published) +to the Greenplum Database community via GitHub. In most cases, additional +communication via Slack, Twitter, mailing lists, blog and other channels will +assist in educating Greenplum Database users and rolling out the patched +release to affected users. -The Greenplum project team will also publish any mitigating steps users can take until the fix can be applied to their Greenplum Database instances. +The Greenplum project team will also publish any mitigating steps users can +take until the fix can be applied to their Greenplum Database instances. ## Mailing lists -* Use security@greenplum.org to report security concerns to the Greenplum Database project team, who uses the list to privately discuss security issues and fixes prior to disclosure. +* Use security@greenplum.org to report security concerns to the Greenplum + Database project team, who uses the list to privately discuss security issues + and fixes prior to disclosure. ## Confidentiality, integrity and availability -We consider vulnerabilities leading to the compromise of data confidentiality, elevation of privilege, or integrity to be our highest priority concerns. Availability, in particular in areas relating to DoS and resource exhaustion, is also a serious security concern. The Greenplum Database project team takes all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities seriously and will investigate them in an urgent and expeditious manner. - -Note that we do not currently consider the default settings for Greenplum Database to be secure-by-default. It is necessary for operators to explicitly configure settings, role based access control, and other resource related features in Greenplum Database to provide a hardened Greenplum Database environment. We will not act on any security disclosure that relates to a lack of safe defaults. Over time, we will work towards improved safe-by-default configuration, taking into account backwards compatibility. +We consider vulnerabilities leading to the compromise of data confidentiality, +elevation of privilege, or integrity to be our highest priority concerns. +Availability, in particular in areas relating to DoS and resource exhaustion, +is also a serious security concern. The Greenplum Database project team takes +all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities +seriously and will investigate them in an urgent and expeditious manner. + +Note that we do not currently consider the default settings for Greenplum +Database to be secure-by-default. It is necessary for operators to explicitly +configure settings, role based access control, and other resource related +features in Greenplum Database to provide a hardened Greenplum Database +environment. We will not act on any security disclosure that relates to a lack +of safe defaults. Over time, we will work towards improved safe-by-default +configuration, taking into account backwards compatibility. From c5fe43694371c24b211dd32c3f68d84338902da0 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 8 Nov 2022 09:17:32 +0800 Subject: [PATCH 225/330] Rewrite the diskquota worker scheduler (#245) Rewrite the diskquota worker scheduler. Each DB entry manages the running time by itself to make the scheduler more simple. I have tested the performance, nearly the same as before. Add a UDF db_status to help debug or monitor the diskquota, related SQL is in test_util.sql. I didn't put it into diskquota--2.1.sql, because I don't want users to use it now. I didn't use any shared lock when getting that status info, so they may be not consistent. Co-authored-by: Xing Guo Co-authored-by: Zhang Hao --- diskquota.c | 291 +++++++++++++++++++++-------------- diskquota.h | 16 +- diskquota_utility.c | 73 +++++++++ test_util.sql | 11 ++ tests/regress/sql/config.sql | 1 + 5 files changed, 276 insertions(+), 116 deletions(-) create mode 100644 test_util.sql diff --git a/diskquota.c b/diskquota.c index 77f44da90bc..bdc603d3560 100644 --- a/diskquota.c +++ b/diskquota.c @@ -84,25 +84,9 @@ static DiskQuotaWorkerEntry *volatile MyWorkerInfo = NULL; static int num_db = 0; // in shared memory, only for launcher process -static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; +DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; -/* - * the current db to be run or running. - * a in-process static value, pointer to shared memory - * - * curDB has 3 different kinds of values: - * 1) when curDB is NULL, it means we can start workers - * for the first databases in DiskquotaLauncherShmem->dbArray - * - * 2) when curDB is DiskquotaLauncherShmem->dbArrayTail, - * means it had finish one loop just now. And should - * sleep for ${diskquota.naptime} sconds. - * - * 3) when curDB is pointing to any db entry in - * DiskquotaLauncherShmem->dbArray[], it means it is in - * one loop to start each worker for each database. - */ -static DiskquotaDBEntry *curDB = NULL; +#define MIN_SLEEPTIME 100 /* milliseconds */ /* * bgworker handles, in launcher local memory, @@ -162,7 +146,7 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); static void define_guc_variables(void); -static bool start_worker(void); +static bool start_worker(DiskquotaDBEntry *dbEntry); static void create_monitor_db_table(void); static void add_dbid_to_database_list(Oid dbid); static void del_dbid_from_database_list(Oid dbid); @@ -176,8 +160,7 @@ extern void invalidate_database_rejectmap(Oid dbid); static void FreeWorkerOnExit(int code, Datum arg); static void FreeWorker(DiskQuotaWorkerEntry *worker); static void init_database_list(void); -static bool CanLaunchWorker(void); -static DiskquotaDBEntry *next_db(void); +static DiskquotaDBEntry *next_db(DiskquotaDBEntry *curDB); static DiskQuotaWorkerEntry *next_worker(void); static DiskquotaDBEntry *add_db_entry(Oid dbid); static void release_db_entry(Oid dbid); @@ -187,6 +170,7 @@ static void vacuum_db_entry(DiskquotaDBEntry *db); static void init_bgworker_handles(void); static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); +static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); bool diskquota_is_paused() @@ -384,7 +368,7 @@ disk_quota_worker_main(Datum main_arg) /* Disable ORCA to avoid fallback */ optimizer = false; - ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); + ereport(DEBUG1, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); /* Establish signal handlers before unblocking signals. */ pqsignal(SIGHUP, disk_quota_sighup); pqsignal(SIGTERM, disk_quota_sigterm); @@ -489,12 +473,9 @@ disk_quota_worker_main(Datum main_arg) * After running UDF init_table_size_table() The state will changed to * be ready. */ - if (!diskquota_is_paused()) + if (check_diskquota_state_is_ready()) { - if (check_diskquota_state_is_ready()) - { - break; - } + break; } rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); @@ -541,7 +522,6 @@ disk_quota_worker_main(Datum main_arg) // and GPDB6 did not release the MemoryAccount after SPI finish. // Reset the MemoryAccount although we never create it. MemoryAccounting_Reset(); - if (DiskquotaLauncherShmem->isDynamicWorker) { break; @@ -577,6 +557,11 @@ disk_quota_worker_main(Datum main_arg) if (got_sigterm) ereport(LOG, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); + ereport(DEBUG1, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); + long secs; + int usecs; + TimestampDifference(MyWorkerInfo->dbEntry->last_run_time, GetCurrentTimestamp(), &secs, &usecs); + MyWorkerInfo->dbEntry->cost = secs * 1000L + usecs / 1000L; proc_exit(0); } @@ -638,53 +623,62 @@ disk_quota_launcher_main(Datum main_arg) loop_end = time(NULL); struct timeval nap; - nap.tv_sec = diskquota_naptime; - nap.tv_usec = 0; - TimestampTz loop_start_time = GetCurrentTimestamp(); + nap.tv_sec = diskquota_naptime; + nap.tv_usec = 0; /* main loop: do this until the SIGTERM handler tells us to terminate. */ ereport(LOG, (errmsg("[diskquota launcher] start main loop"))); - curDB = NULL; + DiskquotaDBEntry *curDB = NULL; + Oid curDBId = 0; + bool advance_one_db = true; + bool timeout = false; while (!got_sigterm) { int rc; CHECK_FOR_INTERRUPTS(); - - /* - * modify wait time - */ - long secs; - int microsecs; - TimestampDifference(GetCurrentTimestamp(), - TimestampTzPlusMilliseconds(loop_start_time, diskquota_naptime * 1000L), &secs, µsecs); - nap.tv_sec = secs; - nap.tv_usec = microsecs; - - if (curDB == DiskquotaLauncherShmem->dbArrayTail) + /* pick a db to run */ + if (advance_one_db) { - /* Have sleep enough time, should start another loop */ - if (nap.tv_sec == 0 && nap.tv_usec == 0) + curDB = next_db(curDB); + timeout = false; + if (curDB != NULL) { - loop_start_time = GetCurrentTimestamp(); - /* set the curDB pointing to the head of the db list */ - curDB = NULL; + curDBId = curDB->dbid; + elog(DEBUG1, "[diskquota] next db to run:%d", curDB->id); } - /* do nothing, just to sleep untill the nap time is 0 */ else - { - continue; - } + elog(DEBUG1, "[diskquota] no db to run"); } - - /* If there are no enough workers to run db, we can firstly sleep to wait workers */ - if (nap.tv_sec == 0 && nap.tv_usec == 0) + /* + * Modify wait time + * + * If there is no db needed to run or has exceeded the next_run_time, + * just sleep to wait a db or a free worker. + * + * Otherwise check the next_run_time to determin how much time to wait + */ + if (timeout || curDB == NULL) { nap.tv_sec = diskquota_naptime > 0 ? diskquota_naptime : 1; nap.tv_usec = 0; } - - while (curDB != DiskquotaLauncherShmem->dbArrayTail && CanLaunchWorker()) + else { - start_worker(); + TimestampTz curTime = GetCurrentTimestamp(); + TimestampDifference(curTime, curDB->next_run_time, &nap.tv_sec, &nap.tv_usec); + + /* if the sleep time is too short, just skip the sleeping */ + if (nap.tv_sec == 0 && nap.tv_usec < MIN_SLEEPTIME * 1000L) + { + nap.tv_usec = 0; + } + + /* if the sleep time is too long, advance the next_run_time */ + if (nap.tv_sec > diskquota_naptime) + { + nap.tv_sec = diskquota_naptime; + nap.tv_usec = 0; + curDB->next_run_time = TimestampTzPlusMilliseconds(curTime, diskquota_naptime * 1000L); + } } bool sigusr1 = false; @@ -699,13 +693,11 @@ disk_quota_launcher_main(Datum main_arg) if (nap.tv_sec != 0 || nap.tv_usec != 0) { + elog(DEBUG1, "[diskquota] naptime sec:%ld, usec:%d", nap.tv_sec, nap.tv_usec); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); ResetLatch(&MyProc->procLatch); - // wait at least one time slice, avoid 100% CPU usage - if (!diskquota_naptime) usleep(1); - /* Emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) { @@ -716,6 +708,7 @@ disk_quota_launcher_main(Datum main_arg) /* process extension ddl message */ if (got_sigusr2) { + elog(DEBUG1, "[diskquota] got sigusr2"); got_sigusr2 = false; process_extension_ddl_message(); sigusr2 = true; @@ -724,6 +717,7 @@ disk_quota_launcher_main(Datum main_arg) /* in case of a SIGHUP, just reload the configuration. */ if (got_sighup) { + elog(DEBUG1, "[diskquota] got sighup"); got_sighup = false; ProcessConfigFile(PGC_SIGHUP); } @@ -735,10 +729,48 @@ disk_quota_launcher_main(Datum main_arg) */ if (got_sigusr1) { + elog(DEBUG1, "[diskquota] got sigusr1"); got_sigusr1 = false; sigusr1 = true; } + /* + * Try to starts a bgworker for the curDB + * + */ + + /* + * When db list is empty, curDB is NULL. + * When curDB->in_use is false means dbEtnry has been romoved + * When curDB->dbid doesn't equtal curDBId, it means the slot has + * been used by another db + * + * For the above conditions, we just skip this loop and try to fetch + * next db to run. + */ + if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId) + { + advance_one_db = true; + continue; + } + + /* + * Try to start a worker to run the db if has exceeded the next_run_time. + * if start_worker fails, advance_one_db will be set to false, so in the + * next loop will run the db again. + */ + if (TimestampDifferenceExceeds(curDB->next_run_time, GetCurrentTimestamp(), MIN_SLEEPTIME)) + { + bool ret = start_worker(curDB); + advance_one_db = ret; + /* has exceeded the next_run_time of current db */ + timeout = true; + } + else + { + advance_one_db = false; + } + loop_begin = loop_end; loop_end = time(NULL); if (isAbnormalLoopTime(loop_end - loop_begin)) @@ -1048,18 +1080,18 @@ do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_ switch (local_extension_ddl_message.cmd) { case CMD_CREATE_EXTENSION: + if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; add_db_entry(dbid); /* TODO: how about this failed? */ update_monitor_db_mpp(dbid, ADD_DB_TO_MONITOR, LAUNCHER_SCHEMA); - if (num_db > diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = true; break; case CMD_DROP_EXTENSION: + if (num_db <= diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = false; /* terminate bgworker in release_db_entry rountine */ release_db_entry(dbid); update_monitor_db_mpp(dbid, REMOVE_DB_FROM_BEING_MONITORED, LAUNCHER_SCHEMA); /* clear the out-of-quota rejectmap in shared memory */ invalidate_database_rejectmap(dbid); - if (num_db <= diskquota_max_workers) DiskquotaLauncherShmem->isDynamicWorker = false; break; default: ereport(LOG, (errmsg("[diskquota launcher]:received unsupported message cmd=%d", @@ -1240,27 +1272,28 @@ terminate_all_workers(void) */ static bool -start_worker() +start_worker(DiskquotaDBEntry *dbEntry) { BackgroundWorker worker; bool ret; DiskQuotaWorkerEntry *dq_worker; - DiskquotaDBEntry *dbEntry; MemoryContext old_ctx; char *dbname = NULL; dq_worker = next_worker(); - if (dq_worker == NULL) return false; - - /* pick a db run */ - LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); - dbEntry = next_db(); - if (dbEntry == DiskquotaLauncherShmem->dbArrayTail) goto Failed; - - dbEntry->workerId = dq_worker->id; - dq_worker->dbEntry = dbEntry; + if (dq_worker == NULL) + { + elog(DEBUG1, "[diskquota] no free workers"); + return false; + } /* free the BackgroundWorkerHandle used by last database */ free_bgworker_handle(dq_worker->id); + + dbEntry->workerId = dq_worker->id; + dq_worker->dbEntry = dbEntry; + dbEntry->last_run_time = GetCurrentTimestamp(); + + /* register a dynamic bgworker and wait for it to start */ memset(&worker, 0, sizeof(BackgroundWorker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; @@ -1291,7 +1324,6 @@ start_worker() elog(WARNING, "Create bgworker failed"); goto Failed; } - BgwHandleStatus status; pid_t pid; status = WaitForBackgroundWorkerStartup(bgworker_handles[dq_worker->id], &pid); @@ -1310,10 +1342,11 @@ start_worker() } Assert(status == BGWH_STARTED); - LWLockRelease(diskquota_locks.dblist_lock); + dbEntry->status = SLOT_RUNNING; return true; Failed: - LWLockRelease(diskquota_locks.dblist_lock); + + elog(DEBUG1, "[diskquota] diskquota, starts diskquota failed"); FreeWorker(dq_worker); return false; } @@ -1582,6 +1615,9 @@ FreeWorker(DiskQuotaWorkerEntry *worker) if (in_use && worker->dbEntry->workerId == worker->id) { worker->dbEntry->workerId = INVALID_WORKER_ID; + worker->dbEntry->status = SLOT_SLEEPING; + worker->dbEntry->next_run_time = + TimestampTzPlusMilliseconds(GetCurrentTimestamp(), diskquota_naptime * 1000L); } } LWLockRelease(diskquota_locks.dblist_lock); @@ -1589,7 +1625,7 @@ FreeWorker(DiskQuotaWorkerEntry *worker) dlist_delete(&worker->node); worker->dbEntry = NULL; dlist_push_head(&DiskquotaLauncherShmem->freeWorkers, &worker->node); - DiskquotaLauncherShmem->running_workers_num--; + elog(DEBUG1, "[diskquota] free worker %d", worker->id); LWLockRelease(diskquota_locks.workerlist_lock); } } @@ -1603,24 +1639,6 @@ FreeWorkerOnExit(int code, Datum arg) } } -static bool -CanLaunchWorker(void) -{ - if (dlist_is_empty(&DiskquotaLauncherShmem->freeWorkers)) - { - return false; - } - if (num_db <= 0) - { - return false; - } - if (DiskquotaLauncherShmem->running_workers_num >= num_db) - { - return false; - } - return true; -} - void init_launcher_shmem() { @@ -1648,7 +1666,6 @@ init_launcher_shmem() DiskquotaDBEntry *dbArrayTail = (DiskquotaDBEntry *)hidden_memory_prt; /* add all worker to the free worker list */ - DiskquotaLauncherShmem->running_workers_num = 0; for (int i = 0; i < diskquota_max_workers; i++) { memset(&worker[i], 0, sizeof(DiskQuotaWorkerEntry)); @@ -1664,27 +1681,36 @@ init_launcher_shmem() memset(&DiskquotaLauncherShmem->dbArray[i], 0, sizeof(DiskquotaDBEntry)); DiskquotaLauncherShmem->dbArray[i].id = i; DiskquotaLauncherShmem->dbArray[i].workerId = INVALID_WORKER_ID; + DiskquotaLauncherShmem->dbArray[i].status = SLOT_UNUSED; } } } /* * Look for an unused slot. If we find one, grab it. + * + * We always look for the slot from the lower-numbers slots + * firstly, so that we can recycle the slots instead of using + * the unused slots in order to recycle the shared memory + * allocated before. */ static DiskquotaDBEntry * add_db_entry(Oid dbid) { DiskquotaDBEntry *result = NULL; + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); /* if there is already dbEntry's dbid equals dbid, returning the existing one */ for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) { DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; if (!dbEntry->in_use && result == NULL) { - dbEntry->dbid = dbid; - dbEntry->in_use = true; - result = dbEntry; + dbEntry->dbid = dbid; + dbEntry->in_use = true; + dbEntry->next_run_time = GetCurrentTimestamp(); + dbEntry->status = SLOT_SLEEPING; + result = dbEntry; } else if (dbEntry->in_use && dbEntry->dbid == dbid) { @@ -1696,6 +1722,9 @@ add_db_entry(Oid dbid) ereport(WARNING, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " "will not enable diskquota", dbid))); + if (result != NULL) elog(DEBUG1, "[diskquota] add db entry: id: %d, %u", result->id, dbid); + + LWLockRelease(diskquota_locks.dblist_lock); return result; } @@ -1736,24 +1765,27 @@ release_db_entry(Oid dbid) * If the picked db is in running status, skip it, pick the next one to run. */ static DiskquotaDBEntry * -next_db(void) +next_db(DiskquotaDBEntry *curDB) { - if (curDB == NULL || curDB == DiskquotaLauncherShmem->dbArrayTail) + DiskquotaDBEntry *result = NULL; + int nextSlot = 0; + if (curDB != NULL) { - curDB = DiskquotaLauncherShmem->dbArray; + nextSlot = curDB->id + 1; } - else - { - curDB++; - } - for (; curDB < DiskquotaLauncherShmem->dbArrayTail; curDB++) + + LWLockAcquire(diskquota_locks.dblist_lock, LW_SHARED); + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) { - if (!curDB->in_use) continue; - if (curDB->workerId != INVALID_WORKER_ID) continue; - if (curDB->dbid == InvalidOid) continue; + if (nextSlot >= MAX_NUM_MONITORED_DB) nextSlot = 0; + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; + nextSlot++; + if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; + result = dbEntry; break; } - return curDB; + LWLockRelease(diskquota_locks.dblist_lock); + return result; } static DiskQuotaWorkerEntry * @@ -1769,7 +1801,7 @@ next_worker(void) dq_worker = dlist_container(DiskQuotaWorkerEntry, node, wnode); reset_worker(dq_worker); dlist_push_head(&DiskquotaLauncherShmem->runningWorkers, &dq_worker->node); - DiskquotaLauncherShmem->running_workers_num++; + elog(DEBUG1, "[diskquota] gets a worker %d", dq_worker->id); out: LWLockRelease(diskquota_locks.workerlist_lock); return dq_worker; @@ -1812,6 +1844,7 @@ vacuum_db_entry(DiskquotaDBEntry *db) db->dbid = InvalidOid; db->inited = false; db->workerId = INVALID_WORKER_ID; + db->status = SLOT_UNUSED; db->in_use = false; } @@ -1841,7 +1874,37 @@ free_bgworker_handle(uint32 worker_id) BackgroundWorkerHandle **handle = &bgworker_handles[worker_id]; if (*handle != NULL) { + WaitForBackgroundWorkerShutdown(*handle); pfree(*handle); *handle = NULL; } } + +static BgwHandleStatus +WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) +{ + BgwHandleStatus status; + int rc; + + for (;;) + { + pid_t pid; + + CHECK_FOR_INTERRUPTS(); + + status = GetBackgroundWorkerPid(handle, &pid); + if (status == BGWH_STOPPED) break; + + rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, 0); + + if (rc & WL_POSTMASTER_DEATH) + { + status = BGWH_POSTMASTER_DIED; + break; + } + + ResetLatch(&MyProc->procLatch); + } + + return status; +} diff --git a/diskquota.h b/diskquota.h index bb4cd2a9b46..8207bf3e766 100644 --- a/diskquota.h +++ b/diskquota.h @@ -26,6 +26,7 @@ #include "utils/hsearch.h" #include "utils/relcache.h" +#include "utils/timestamp.h" #include @@ -155,13 +156,18 @@ struct DiskQuotaWorkerEntry DiskquotaDBEntry *dbEntry; // pointer to shared memory. DiskquotaLauncherShmem->dbArray }; +typedef enum +{ + SLOT_UNUSED = 0, + SLOT_SLEEPING, + SLOT_RUNNING +} DBSlotStatus; typedef struct { dlist_head freeWorkers; // a list of DiskQuotaWorkerEntry dlist_head runningWorkers; // a list of DiskQuotaWorkerEntry DiskquotaDBEntry *dbArray; // size == MAX_NUM_MONITORED_DB DiskquotaDBEntry *dbArrayTail; - int running_workers_num; volatile bool isDynamicWorker; /* DiskQuotaWorkerEntry worker[diskquota_max_workers]; // the hidden memory to store WorkerEntry @@ -169,6 +175,8 @@ typedef struct */ } DiskquotaLauncherShmemStruct; +DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; + /* In shmem, only used on master */ struct DiskquotaDBEntry { @@ -176,7 +184,11 @@ struct DiskquotaDBEntry Oid dbid; // the database oid in postgres catalog #define INVALID_WORKER_ID -1 - int workerId; // the id of the worker which is running for the, 0 means no worker for it. + int workerId; // the id of the worker which is running for the (current DB?), 0 means no worker for it. + TimestampTz next_run_time; + TimestampTz last_run_time; + int16 cost; // ms + DBSlotStatus status; bool inited; // this entry is inited, will set to true after the worker finish the frist run. bool in_use; // this slot is in using. AKA dbid != 0 diff --git a/diskquota_utility.c b/diskquota_utility.c index 8d601619c57..396ccf12e91 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1655,3 +1655,76 @@ check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Can not set disk quota for system owner: %s", rolname))); } + +PG_FUNCTION_INFO_V1(db_status); +Datum +db_status(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + struct StatusCtx + { + int slot; + } * status_ctx; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext oldcontext; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = CreateTemplateTupleDesc(8, false /*hasoid*/); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "ID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "DBID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "WORKERID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "STATUS", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "LAST_RUN_TIME", TIMESTAMPTZOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)6, "COST", INT2OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)7, "NEXT_RUN_TIME", TIMESTAMPTZOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)8, "EPOCH", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + /* Create a local hash table and fill it with entries from shared memory. */ + status_ctx = (struct StatusCtx *)palloc(sizeof(struct StatusCtx)); + + /* Setup first calling context. */ + status_ctx->slot = 0; + funcctx->user_fctx = (void *)status_ctx; + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + status_ctx = (struct StatusCtx *)funcctx->user_fctx; + + while (status_ctx->slot != MAX_NUM_MONITORED_DB) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[status_ctx->slot]; + status_ctx->slot++; + if (!dbEntry->in_use) continue; + Datum result; + Datum values[8]; + bool nulls[8]; + HeapTuple tuple; + + values[0] = Int16GetDatum(dbEntry->id); + values[1] = ObjectIdGetDatum(dbEntry->dbid); + values[2] = Int16GetDatum(dbEntry->workerId); + values[3] = Int16GetDatum(dbEntry->status); + values[4] = TimestampTzGetDatum(dbEntry->last_run_time); + values[5] = Int16GetDatum(dbEntry->cost); + values[6] = TimestampTzGetDatum(dbEntry->next_run_time); + values[7] = Int64GetDatum(worker_get_epoch(dbEntry->dbid)); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + + SRF_RETURN_DONE(funcctx); +} diff --git a/test_util.sql b/test_util.sql new file mode 100644 index 00000000000..b92e48e4d7d --- /dev/null +++ b/test_util.sql @@ -0,0 +1,11 @@ +CREATE TYPE diskquota.db_status AS ( + "id" smallint, + "dbid" oid, + "workerid" smallint, + "status" smallint, + "last_run_time" timestamptz, + "cost" smallint, + "next_run_time" timestamptz, + "epoch" int8 +); +CREATE FUNCTION diskquota.db_status() RETURNS setof diskquota.db_status AS '$libdir/diskquota-2.1.so', 'db_status' LANGUAGE C VOLATILE; diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index 3861c3f63ff..f6755ab2122 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -6,6 +6,7 @@ CREATE DATABASE diskquota; \! gpconfig -c max_worker_processes -v 20 --skipvalidation \! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation \! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +\! gpconfig -c log_min_messages -v debug1 \! gpstop -raf --end_ignore From c86825c85dca89c65f6568fee3bc50b7828dd936 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 8 Nov 2022 19:16:17 +0800 Subject: [PATCH 226/330] Remove PR pipeline base branch (#252) So all PRs can trigger build. --- concourse/pipeline/res_def.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 82278714c83..ab42312aaab 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -27,7 +27,6 @@ resources: disable_forks: false repository: greenplum-db/diskquota access_token: ((extension/github-access-token)) - base_branch: gpdb # Commit trigger - name: diskquota_commit type: git From ff9ef1af1900f65fb1635a23b23fc1c3f4333af9 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 14 Nov 2022 11:15:17 +0800 Subject: [PATCH 227/330] Fix bug: relation_cache cannot isolate table info for each database (#241) Previously, users will see the information of tables in db2 when accessing db1 and executing select diskquota.show_relation_cache(). Meanwhile, if db1's bgworker can see the table in db2, the performance will be influenced, because the table size in db2 will be calculated although it is not useful for db1 diskquota. This PR has added a judgment to prevent this situation. --- gp_activetable.c | 3 ++ quotamodel.c | 6 ++- relation_cache.c | 4 ++ .../expected/test_relation_cache.out | 50 +++++++++++++++++++ tests/isolation2/isolation2_schedule | 1 + tests/isolation2/sql/test_relation_cache.sql | 26 ++++++++++ 6 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 tests/isolation2/expected/test_relation_cache.out create mode 100644 tests/isolation2/sql/test_relation_cache.sql diff --git a/gp_activetable.c b/gp_activetable.c index 3b13002153f..ae4ad190fb5 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -791,6 +791,9 @@ get_active_tables_oid(void) RelFileNode rnode; Oid prelid; + /* The session of db1 should not see the table inside db2. */ + if (active_table_file_entry->dbid != MyDatabaseId) continue; + rnode.dbNode = active_table_file_entry->dbid; rnode.relNode = active_table_file_entry->relfilenode; rnode.spcNode = active_table_file_entry->tablespaceoid; diff --git a/quotamodel.c b/quotamodel.c index d1a1b80ba2e..941e5b25972 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -811,7 +811,8 @@ merge_uncommitted_table_to_oidlist(List *oidlist) hash_seq_init(&iter, relation_cache); while ((entry = hash_seq_search(&iter)) != NULL) { - if (entry->primary_table_relid == entry->relid) + /* The session of db1 should not see the table inside db2. */ + if (entry->primary_table_relid == entry->relid && entry->rnode.node.dbNode == MyDatabaseId) { oidlist = lappend_oid(oidlist, entry->relid); } @@ -1875,7 +1876,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) bool found; LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); relation_cache_entry = hash_search(relation_cache, &active_oid, HASH_FIND, &found); - if (found && relation_cache_entry) + /* The session of db1 should not see the table inside db2. */ + if (found && relation_cache_entry && relation_cache_entry->rnode.node.dbNode == MyDatabaseId) { Oid relnamespace = relation_cache_entry->namespaceoid; Oid reltablespace = relation_cache_entry->rnode.node.spcNode; diff --git a/relation_cache.c b/relation_cache.c index d7ae8c9390f..cabf1cd7fd1 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -284,6 +284,8 @@ remove_committed_relation_from_cache(void) hash_seq_init(&iter, relation_cache); while ((entry = hash_seq_search(&iter)) != NULL) { + /* The session of db1 should not see the table inside db2. */ + if (entry->rnode.node.dbNode != MyDatabaseId) continue; local_entry = hash_search(local_relation_cache, &entry->relid, HASH_ENTER, NULL); memcpy(local_entry, entry, sizeof(DiskQuotaRelationCacheEntry)); } @@ -360,6 +362,8 @@ show_relation_cache(PG_FUNCTION_ARGS) hash_seq_init(&hash_seq, relation_cache); while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&hash_seq)) != NULL) { + /* The session of db1 should not see the table inside db2. */ + if (entry->rnode.node.dbNode != MyDatabaseId) continue; DiskQuotaRelationCacheEntry *local_entry = hash_search(relation_cache_ctx->relation_cache, &entry->relid, HASH_ENTER_NULL, NULL); if (local_entry) diff --git a/tests/isolation2/expected/test_relation_cache.out b/tests/isolation2/expected/test_relation_cache.out new file mode 100644 index 00000000000..fd9d4906aa5 --- /dev/null +++ b/tests/isolation2/expected/test_relation_cache.out @@ -0,0 +1,50 @@ +CREATE DATABASE tempdb1; +CREATE +CREATE DATABASE tempdb2; +CREATE + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +CREATE +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: CREATE EXTENSION diskquota; +CREATE +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +BEGIN +1:@db_name tempdb1: CREATE TABLE t(i int); +CREATE +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); +INSERT 10000 + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + count +------- + 0 +(1 row) + +1:@db_name tempdb1: ABORT; +ABORT + +1:@db_name tempdb1: DROP EXTENSION diskquota; +DROP +2:@db_name tempdb2: DROP EXTENSION diskquota; +DROP +1q: ... +2q: ... + +DROP DATABASE tempdb1; +DROP +DROP DATABASE tempdb2; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index bf4ca896373..090ea9ad073 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -8,5 +8,6 @@ test: test_truncate test: test_postmaster_restart test: test_worker_timeout test: test_per_segment_config +test: test_relation_cache test: test_drop_extension test: reset_config diff --git a/tests/isolation2/sql/test_relation_cache.sql b/tests/isolation2/sql/test_relation_cache.sql new file mode 100644 index 00000000000..59ebde9fea3 --- /dev/null +++ b/tests/isolation2/sql/test_relation_cache.sql @@ -0,0 +1,26 @@ +CREATE DATABASE tempdb1; +CREATE DATABASE tempdb2; + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); +2:@db_name tempdb2: CREATE EXTENSION diskquota; +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +1:@db_name tempdb1: CREATE TABLE t(i int); +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + +1:@db_name tempdb1: ABORT; + +1:@db_name tempdb1: DROP EXTENSION diskquota; +2:@db_name tempdb2: DROP EXTENSION diskquota; +1q: +2q: + +DROP DATABASE tempdb1; +DROP DATABASE tempdb2; From 05615f808d2fa8fec8bec52db581240dd8263610 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 17 Nov 2022 14:34:42 +0800 Subject: [PATCH 228/330] Fix flaky test (#250) When diskquota is in dynamic mode, it may take a long time to start a worker for a database (because the worker is busy), so some tests will fail after pg_sleep. Add the diskquota_test extension to resolve the issue. In it, there is a UDF diskquota_test.wait(SQL test), which we can run by passing any SQL whose select result is true or false to it, it runs the SQL and waits for the selection result to be true at most 10 seconds. diskquota_test.db_status will return all the databases which are monitored by diskquota and their status and epoch. diskquota_test.cur_db_status() and diskquota_test.db_status() are just wrappers of it, which are more useful in the tests. In the regress tests, replace the sleep with diskquota_test.wait or diskquota.wait_for_worker_new_epoch(). What's more, I have extracted the monitored_dbid_cache related data and functions into the monitored_db.c file. Disable the regress test test_init_table_size_table, there is a bug making it fail, I will fix it in another pr. --- CMakeLists.txt | 3 +- diskquota--2.1.sql | 2 +- diskquota.c | 180 ++-------- diskquota.h | 31 +- diskquota_enum.h | 8 + diskquota_test--1.0.sql | 37 ++ diskquota_test.control | 5 + diskquota_utility.c | 123 +------ gp_activetable.c | 1 - monitored_db.c | 335 ++++++++++++++++++ test_util.sql | 11 - tests/CMakeLists.txt | 10 + tests/regress/diskquota_schedule | 2 +- .../expected/test_activetable_limit.out | 2 - .../expected/test_ctas_no_preload_lib.out | 15 +- tests/regress/expected/test_extension.out | 54 +-- .../expected/test_insert_after_drop.out | 15 +- .../expected/test_readiness_logged.out | 17 +- tests/regress/expected/test_table_size.out | 21 +- .../regress/expected/test_update_db_cache.out | 14 +- .../expected/test_worker_not_ready.out | 9 +- tests/regress/sql/test_activetable_limit.sql | 3 - .../regress/sql/test_ctas_no_preload_lib.sql | 7 +- tests/regress/sql/test_extension.sql | 27 +- tests/regress/sql/test_insert_after_drop.sql | 5 +- tests/regress/sql/test_readiness_logged.sql | 5 +- tests/regress/sql/test_table_size.sql | 9 +- tests/regress/sql/test_update_db_cache.sql | 8 +- tests/regress/sql/test_worker_not_ready.sql | 3 +- 29 files changed, 499 insertions(+), 463 deletions(-) create mode 100644 diskquota_enum.h create mode 100644 diskquota_test--1.0.sql create mode 100644 diskquota_test.control create mode 100644 monitored_db.c delete mode 100644 test_util.sql diff --git a/CMakeLists.txt b/CMakeLists.txt index a16d6d6dca6..a13d08a447b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -72,7 +72,8 @@ list( enforcement.c gp_activetable.c quotamodel.c - relation_cache.c) + relation_cache.c + monitored_db.c) list( APPEND diff --git a/diskquota--2.1.sql b/diskquota--2.1.sql index de1f6f6f4e0..eb12606d69a 100644 --- a/diskquota--2.1.sql +++ b/diskquota--2.1.sql @@ -312,7 +312,7 @@ INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, SELECT FROM diskquota.resume(); --- Starting the worker has to be the last step. +--- Starting the worker has to be the last step. CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; SELECT diskquota.diskquota_start_worker(); DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota.c b/diskquota.c index bdc603d3560..3488e6fd41c 100644 --- a/diskquota.c +++ b/diskquota.c @@ -83,8 +83,7 @@ static DiskQuotaWorkerEntry *volatile MyWorkerInfo = NULL; // how many database diskquota are monitoring on static int num_db = 0; -// in shared memory, only for launcher process -DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; +static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; #define MIN_SLEEPTIME 100 /* milliseconds */ @@ -97,46 +96,6 @@ DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; */ BackgroundWorkerHandle **bgworker_handles; -bool -diskquota_is_readiness_logged() -{ - Assert(MyDatabaseId != InvalidOid); - bool is_readiness_logged; - - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); - { - MonitorDBEntry hash_entry; - bool found; - - hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); - is_readiness_logged = found ? hash_entry->is_readiness_logged : false; - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); - - return is_readiness_logged; -} - -void -diskquota_set_readiness_logged() -{ - Assert(MyDatabaseId != InvalidOid); - - /* - * We actually need ROW EXCLUSIVE lock here. Given that the current worker - * is the the only process that modifies the entry, it is safe to only take - * the shared lock. - */ - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); - { - MonitorDBEntry hash_entry; - bool found; - - hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); - hash_entry->is_readiness_logged = true; - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); -} - /* functions of disk quota*/ void _PG_init(void); void _PG_fini(void); @@ -172,24 +131,6 @@ static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); -bool -diskquota_is_paused() -{ - Assert(MyDatabaseId != InvalidOid); - bool paused = false; - bool found; - MonitorDBEntry entry; - - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); - entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); - if (found) - { - paused = entry->paused; - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); - return paused; -} - /* * diskquota_launcher_shmem_size * Compute space needed for diskquota launcher related shared memory @@ -458,6 +399,8 @@ disk_quota_worker_main(Datum main_arg) */ init_ps_display("bgworker:", "[diskquota]", dbname, ""); + /* suppose the database is ready, if not, then set it to false */ + bool is_ready = true; /* Waiting for diskquota state become ready */ while (!got_sigterm) { @@ -475,8 +418,14 @@ disk_quota_worker_main(Datum main_arg) */ if (check_diskquota_state_is_ready()) { + is_ready = true; break; } + if (is_ready) + { + update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_UNREADY); + is_ready = false; + } rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); @@ -498,6 +447,7 @@ disk_quota_worker_main(Datum main_arg) } } + if (!MyWorkerInfo->dbEntry->inited) update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_RUNNING); bool is_gang_destroyed = false; while (!got_sigterm) { @@ -558,10 +508,12 @@ disk_quota_worker_main(Datum main_arg) if (got_sigterm) ereport(LOG, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); ereport(DEBUG1, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); +#if DISKQUOTA_DEBUG long secs; int usecs; TimestampDifference(MyWorkerInfo->dbEntry->last_run_time, GetCurrentTimestamp(), &secs, &usecs); MyWorkerInfo->dbEntry->cost = secs * 1000L + usecs / 1000L; +#endif proc_exit(0); } @@ -664,7 +616,11 @@ disk_quota_launcher_main(Datum main_arg) else { TimestampTz curTime = GetCurrentTimestamp(); - TimestampDifference(curTime, curDB->next_run_time, &nap.tv_sec, &nap.tv_usec); + long sec; + int usec; + TimestampDifference(curTime, curDB->next_run_time, &sec, &usec); + nap.tv_sec = sec; + nap.tv_usec = usec; /* if the sleep time is too short, just skip the sleeping */ if (nap.tv_sec == 0 && nap.tv_usec < MIN_SLEEPTIME * 1000L) @@ -1289,9 +1245,12 @@ start_worker(DiskquotaDBEntry *dbEntry) /* free the BackgroundWorkerHandle used by last database */ free_bgworker_handle(dq_worker->id); - dbEntry->workerId = dq_worker->id; - dq_worker->dbEntry = dbEntry; + dbEntry->workerId = dq_worker->id; + dq_worker->dbEntry = dbEntry; + +#if DISKQUOTA_DEBUG dbEntry->last_run_time = GetCurrentTimestamp(); +#endif /* register a dynamic bgworker and wait for it to start */ memset(&worker, 0, sizeof(BackgroundWorker)); @@ -1342,7 +1301,6 @@ start_worker(DiskquotaDBEntry *dbEntry) } Assert(status == BGWH_STARTED); - dbEntry->status = SLOT_RUNNING; return true; Failed: @@ -1366,54 +1324,6 @@ is_valid_dbid(Oid dbid) return true; } -bool -worker_increase_epoch(Oid dbid) -{ - bool found = false; - MonitorDBEntry entry; - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); - entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); - - if (found) - { - pg_atomic_fetch_add_u32(&(entry->epoch), 1); - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); - return found; -} - -uint32 -worker_get_epoch(Oid dbid) -{ - bool found = false; - uint32 epoch = 0; - MonitorDBEntry entry; - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); - entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); - if (found) - { - epoch = pg_atomic_read_u32(&(entry->epoch)); - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); - if (!found) - { - ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] worker not found for database \"%s\"", get_database_name(dbid)))); - } - return epoch; -} - -// Returns the worker epoch for the current database. -// An epoch marks a new iteration of refreshing quota usage by a bgworker. -// An epoch is a 32-bit unsigned integer and there is NO invalid value. -// Therefore, the UDF must throw an error if something unexpected occurs. -PG_FUNCTION_INFO_V1(show_worker_epoch); -Datum -show_worker_epoch(PG_FUNCTION_ARGS) -{ - PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); -} - static const char * diskquota_status_check_soft_limit() { @@ -1561,48 +1471,6 @@ diskquota_status(PG_FUNCTION_ARGS) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } -static bool -check_for_timeout(TimestampTz start_time) -{ - long diff_secs = 0; - int diff_usecs = 0; - TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); - if (diff_secs >= diskquota_worker_timeout) - { - ereport(NOTICE, (errmsg("[diskquota] timeout when waiting for worker"), - errhint("please check if the bgworker is still alive."))); - return true; - } - return false; -} - -// Checks if the bgworker for the current database works as expected. -// 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. -// 2. If it does not terminate, there must be some issues with the bgworker. -// In this case, we must ensure this UDF can be interrupted by the user. -PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); -Datum -wait_for_worker_new_epoch(PG_FUNCTION_ARGS) -{ - TimestampTz start_time = GetCurrentTimestamp(); - uint32 current_epoch = worker_get_epoch(MyDatabaseId); - for (;;) - { - CHECK_FOR_INTERRUPTS(); - if (check_for_timeout(start_time)) start_time = GetCurrentTimestamp(); - uint32 new_epoch = worker_get_epoch(MyDatabaseId); - /* Unsigned integer underflow is OK */ - if (new_epoch - current_epoch >= 2u) - { - PG_RETURN_BOOL(true); - } - /* Sleep for naptime to reduce CPU usage */ - (void)WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT, diskquota_naptime ? diskquota_naptime : 1); - ResetLatch(&MyProc->procLatch); - } - PG_RETURN_BOOL(false); -} - static void FreeWorker(DiskQuotaWorkerEntry *worker) { @@ -1615,7 +1483,6 @@ FreeWorker(DiskQuotaWorkerEntry *worker) if (in_use && worker->dbEntry->workerId == worker->id) { worker->dbEntry->workerId = INVALID_WORKER_ID; - worker->dbEntry->status = SLOT_SLEEPING; worker->dbEntry->next_run_time = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), diskquota_naptime * 1000L); } @@ -1681,7 +1548,6 @@ init_launcher_shmem() memset(&DiskquotaLauncherShmem->dbArray[i], 0, sizeof(DiskquotaDBEntry)); DiskquotaLauncherShmem->dbArray[i].id = i; DiskquotaLauncherShmem->dbArray[i].workerId = INVALID_WORKER_ID; - DiskquotaLauncherShmem->dbArray[i].status = SLOT_UNUSED; } } } @@ -1709,7 +1575,6 @@ add_db_entry(Oid dbid) dbEntry->dbid = dbid; dbEntry->in_use = true; dbEntry->next_run_time = GetCurrentTimestamp(); - dbEntry->status = SLOT_SLEEPING; result = dbEntry; } else if (dbEntry->in_use && dbEntry->dbid == dbid) @@ -1844,7 +1709,6 @@ vacuum_db_entry(DiskquotaDBEntry *db) db->dbid = InvalidOid; db->inited = false; db->workerId = INVALID_WORKER_ID; - db->status = SLOT_UNUSED; db->in_use = false; } diff --git a/diskquota.h b/diskquota.h index 8207bf3e766..86735183b70 100644 --- a/diskquota.h +++ b/diskquota.h @@ -156,12 +156,6 @@ struct DiskQuotaWorkerEntry DiskquotaDBEntry *dbEntry; // pointer to shared memory. DiskquotaLauncherShmem->dbArray }; -typedef enum -{ - SLOT_UNUSED = 0, - SLOT_SLEEPING, - SLOT_RUNNING -} DBSlotStatus; typedef struct { dlist_head freeWorkers; // a list of DiskQuotaWorkerEntry @@ -175,8 +169,6 @@ typedef struct */ } DiskquotaLauncherShmemStruct; -DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; - /* In shmem, only used on master */ struct DiskquotaDBEntry { @@ -184,27 +176,33 @@ struct DiskquotaDBEntry Oid dbid; // the database oid in postgres catalog #define INVALID_WORKER_ID -1 - int workerId; // the id of the worker which is running for the (current DB?), 0 means no worker for it. - TimestampTz next_run_time; - TimestampTz last_run_time; - int16 cost; // ms - DBSlotStatus status; + int workerId; // the id of the worker which is running for the (current DB?), 0 means no worker for it. + TimestampTz next_run_time; + TimestampTz last_run_time; + int16 cost; // ms bool inited; // this entry is inited, will set to true after the worker finish the frist run. bool in_use; // this slot is in using. AKA dbid != 0 }; +typedef enum MonitorDBStatus +{ +#define DB_STATUS(id, str) id, +#include "diskquota_enum.h" +#undef DB_STATUS + DB_STATUS_MAX +} MonitorDBStatus; /* used in monitored_dbid_cache, in shmem, both on master and segments */ + typedef struct MonitorDBEntryStruct *MonitorDBEntry; struct MonitorDBEntryStruct { - Oid dbid; // the key - + Oid dbid; // the key + pg_atomic_uint32 status; // enum MonitorDBStatus bool paused; bool is_readiness_logged; /* true if we have logged the error message for not ready */ pg_atomic_uint32 epoch; /* this counter will be increased after each worker loop */ }; - extern HTAB *disk_quota_worker_map; /* drop extension hook */ @@ -252,4 +250,5 @@ extern void vacuum_disk_quota_model(uint32 id); extern void update_monitor_db(Oid dbid, FetchTableStatType action); extern void update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema); extern void diskquota_stop_worker(void); +extern void update_monitordb_status(Oid dbid, uint32 status); #endif diff --git a/diskquota_enum.h b/diskquota_enum.h new file mode 100644 index 00000000000..28923b9833a --- /dev/null +++ b/diskquota_enum.h @@ -0,0 +1,8 @@ +#ifdef DB_STATUS +DB_STATUS(DB_STATUS_UNKNOWN = 0, "UNKNOWN") +DB_STATUS(DB_INIT, "INIT") +DB_STATUS(DB_ERROR, "ERROR") +DB_STATUS(DB_UNREADY, "UNREADY") +DB_STATUS(DB_PAUSED, "PAUSED") +DB_STATUS(DB_RUNNING, "RUNNING") +#endif diff --git a/diskquota_test--1.0.sql b/diskquota_test--1.0.sql new file mode 100644 index 00000000000..9ef874d502a --- /dev/null +++ b/diskquota_test--1.0.sql @@ -0,0 +1,37 @@ +CREATE SCHEMA diskquota_test; + +-- test function +CREATE FUNCTION diskquota_test.wait(sql text) RETURNS bool +AS $$ +DECLARE +res bool := false; +count integer := 10; +BEGIN + WHILE count > 0 LOOP + EXECUTE sql into res; + IF res THEN + RETURN res; + ELSE + count = count - 1; + EXECUTE 'select pg_sleep(1);'; + END IF; + END LOOP; + RETURN res; +END; +$$ LANGUAGE plpgsql; + +CREATE TYPE diskquota_test.db_status AS ( + "dbid" oid, + "datname" text, + "status" text, + "epoch" int8, + "paused" bool +); +CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.1.so', 'db_status' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota_test.cur_db_status() RETURNS diskquota_test.db_status AS $$ +SELECT * from diskquota_test.db_status() where datname = current_database(); +$$ LANGUAGE SQL; + +CREATE FUNCTION diskquota_test.check_cur_db_status(text) RETURNS boolean AS $$ +SELECT $1 = db.status from diskquota_test.db_status() as db where db.datname = current_database(); +$$ LANGUAGE SQL; diff --git a/diskquota_test.control b/diskquota_test.control new file mode 100644 index 00000000000..11a91927fc0 --- /dev/null +++ b/diskquota_test.control @@ -0,0 +1,5 @@ +# diskquota test extension +comment = 'extension to test diskquota' +default_version = '1.0' +module_pathname = 'do-not-use-this' +relocatable = true diff --git a/diskquota_utility.c b/diskquota_utility.c index 396ccf12e91..badb1356dd9 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -484,7 +484,8 @@ is_database_empty(void) "FROM " " pg_class AS c, " " pg_namespace AS n " - "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'", + /* Fileter relkind c = composite type */ + "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota' and relkind != 'c'", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(errno)); @@ -1127,53 +1128,6 @@ get_size_in_mb(char *str) return result; } -/* - * Function to update the db list on each segment - * Will print a WARNING to log if out of memory - */ -void -update_monitor_db(Oid dbid, FetchTableStatType action) -{ - bool found = false; - - // add/remove the dbid to monitoring database cache to filter out table not under - // monitoring in hook functions - - LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); - if (action == ADD_DB_TO_MONITOR) - { - MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); - if (entry == NULL) - { - ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); - } - entry->paused = false; - pg_atomic_init_u32(&(entry->epoch), 0); - } - else if (action == REMOVE_DB_FROM_BEING_MONITORED) - { - hash_search(monitored_dbid_cache, &dbid, HASH_REMOVE, &found); - } - else if (action == PAUSE_DB_TO_MONITOR) - { - MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); - if (found) - { - entry->paused = true; - } - } - else if (action == RESUME_DB_TO_MONITOR) - { - MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); - - if (found) - { - entry->paused = false; - } - } - LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); -} - /* * Function to set disk quota ratio for per-segment */ @@ -1655,76 +1609,3 @@ check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Can not set disk quota for system owner: %s", rolname))); } - -PG_FUNCTION_INFO_V1(db_status); -Datum -db_status(PG_FUNCTION_ARGS) -{ - FuncCallContext *funcctx; - struct StatusCtx - { - int slot; - } * status_ctx; - - if (SRF_IS_FIRSTCALL()) - { - TupleDesc tupdesc; - MemoryContext oldcontext; - - /* Create a function context for cross-call persistence. */ - funcctx = SRF_FIRSTCALL_INIT(); - - /* Switch to memory context appropriate for multiple function calls */ - oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - tupdesc = CreateTemplateTupleDesc(8, false /*hasoid*/); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "ID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)2, "DBID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "WORKERID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)4, "STATUS", INT2OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)5, "LAST_RUN_TIME", TIMESTAMPTZOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)6, "COST", INT2OID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)7, "NEXT_RUN_TIME", TIMESTAMPTZOID, -1 /*typmod*/, 0 /*attdim*/); - TupleDescInitEntry(tupdesc, (AttrNumber)8, "EPOCH", INT8OID, -1 /*typmod*/, 0 /*attdim*/); - funcctx->tuple_desc = BlessTupleDesc(tupdesc); - - /* Create a local hash table and fill it with entries from shared memory. */ - status_ctx = (struct StatusCtx *)palloc(sizeof(struct StatusCtx)); - - /* Setup first calling context. */ - status_ctx->slot = 0; - funcctx->user_fctx = (void *)status_ctx; - MemoryContextSwitchTo(oldcontext); - } - - funcctx = SRF_PERCALL_SETUP(); - status_ctx = (struct StatusCtx *)funcctx->user_fctx; - - while (status_ctx->slot != MAX_NUM_MONITORED_DB) - { - DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[status_ctx->slot]; - status_ctx->slot++; - if (!dbEntry->in_use) continue; - Datum result; - Datum values[8]; - bool nulls[8]; - HeapTuple tuple; - - values[0] = Int16GetDatum(dbEntry->id); - values[1] = ObjectIdGetDatum(dbEntry->dbid); - values[2] = Int16GetDatum(dbEntry->workerId); - values[3] = Int16GetDatum(dbEntry->status); - values[4] = TimestampTzGetDatum(dbEntry->last_run_time); - values[5] = Int16GetDatum(dbEntry->cost); - values[6] = TimestampTzGetDatum(dbEntry->next_run_time); - values[7] = Int64GetDatum(worker_get_epoch(dbEntry->dbid)); - - memset(nulls, false, sizeof(nulls)); - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); - result = HeapTupleGetDatum(tuple); - - SRF_RETURN_NEXT(funcctx, result); - } - - SRF_RETURN_DONE(funcctx); -} diff --git a/gp_activetable.c b/gp_activetable.c index ae4ad190fb5..61474cf0bbc 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -56,7 +56,6 @@ HTAB *active_tables_map = NULL; // Set * dbid will be added to it when creating diskquota extension * dbid will be removed from it when droping diskquota extension */ -HTAB *monitored_dbid_cache = NULL; // Map HTAB *altered_reloid_cache = NULL; // Set /* active table hooks which detect the disk file size change. */ diff --git a/monitored_db.c b/monitored_db.c new file mode 100644 index 00000000000..e7af1877161 --- /dev/null +++ b/monitored_db.c @@ -0,0 +1,335 @@ +#include "diskquota.h" +#include "postgres.h" + +#include "funcapi.h" +#include "port/atomics.h" +#include "commands/dbcommands.h" +#include "storage/proc.h" +#include "utils/builtins.h" + +PG_FUNCTION_INFO_V1(show_worker_epoch); +PG_FUNCTION_INFO_V1(db_status); +PG_FUNCTION_INFO_V1(wait_for_worker_new_epoch); + +HTAB *monitored_dbid_cache = NULL; // Map +const char *MonitorDBStatusToString[] = { +#define DB_STATUS(id, str) str, +#include "diskquota_enum.h" +#undef DB_STATUS +}; + +static bool check_for_timeout(TimestampTz start_time); +static MonitorDBEntry dump_monitored_dbid_cache(long *nitems); +// Returns the worker epoch for the current database. +// An epoch marks a new iteration of refreshing quota usage by a bgworker. +// An epoch is a 32-bit unsigned integer and there is NO invalid value. +// Therefore, the UDF must throw an error if something unexpected occurs. +Datum +show_worker_epoch(PG_FUNCTION_ARGS) +{ + PG_RETURN_UINT32(worker_get_epoch(MyDatabaseId)); +} + +Datum +db_status(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + struct StatusCtx + { + MonitorDBEntry entries; + long nitems; + int index; + } * status_ctx; + + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcontext; + TupleDesc tupdesc; + + /* Create a function context for cross-call persistence. */ + funcctx = SRF_FIRSTCALL_INIT(); + + /* Switch to memory context appropriate for multiple function calls */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + tupdesc = CreateTemplateTupleDesc(5, false /*hasoid*/); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "DBID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "DATNAME", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "STATUS", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)4, "EPOCH", INT8OID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)5, "PAUSED", BOOLOID, -1 /*typmod*/, 0 /*attdim*/); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + status_ctx = (struct StatusCtx *)palloc(sizeof(struct StatusCtx)); + + /* Setup first calling context. */ + funcctx->user_fctx = (void *)status_ctx; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + status_ctx->nitems = hash_get_num_entries(monitored_dbid_cache); + /* + * As we need acquire lock monitored_dbid_cache_lock to access + * monitored_dbid_cache hash table, but it's unsafe to acquire lock + * in the function, when the function fails the lock can not be + * released correctly. So dump the hash table into a array in the + * local memory. The hash table is small, it doesn't consume much + * memory. + */ + status_ctx->entries = dump_monitored_dbid_cache(&status_ctx->nitems); + status_ctx->index = 0; + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + status_ctx = (struct StatusCtx *)funcctx->user_fctx; + + while (status_ctx->index < status_ctx->nitems) + { + MonitorDBEntry entry = &status_ctx->entries[status_ctx->index]; + status_ctx->index++; + Datum result; + Datum values[5]; + bool nulls[5]; + HeapTuple tuple; + + values[0] = ObjectIdGetDatum(entry->dbid); + values[1] = CStringGetTextDatum(get_database_name(entry->dbid)); + int status = Int32GetDatum(pg_atomic_read_u32(&(entry->status))); + status = status >= DB_STATUS_MAX ? DB_STATUS_UNKNOWN : status; + values[2] = CStringGetTextDatum(MonitorDBStatusToString[status]); + values[3] = UInt32GetDatum(pg_atomic_read_u32(&(entry->epoch))); + values[4] = BoolGetDatum(entry->paused); + + memset(nulls, false, sizeof(nulls)); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); + } + pfree(status_ctx->entries); + SRF_RETURN_DONE(funcctx); +} + +// Checks if the bgworker for the current database works as expected. +// 1. If it returns successfully in `diskquota.naptime`, the bgworker works as expected. +// 2. If it does not terminate, there must be some issues with the bgworker. +// In this case, we must ensure this UDF can be interrupted by the user. +Datum +wait_for_worker_new_epoch(PG_FUNCTION_ARGS) +{ + TimestampTz start_time = GetCurrentTimestamp(); + uint32 current_epoch = worker_get_epoch(MyDatabaseId); + for (;;) + { + CHECK_FOR_INTERRUPTS(); + if (check_for_timeout(start_time)) start_time = GetCurrentTimestamp(); + uint32 new_epoch = worker_get_epoch(MyDatabaseId); + /* Unsigned integer underflow is OK */ + if (new_epoch - current_epoch >= 2u) + { + PG_RETURN_BOOL(true); + } + /* Sleep for naptime to reduce CPU usage */ + (void)WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT, diskquota_naptime ? diskquota_naptime : 1); + ResetLatch(&MyProc->procLatch); + } + PG_RETURN_BOOL(false); +} + +bool +diskquota_is_paused() +{ + Assert(MyDatabaseId != InvalidOid); + bool paused = false; + bool found; + MonitorDBEntry entry; + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &MyDatabaseId, HASH_FIND, &found); + if (found) + { + paused = entry->paused; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + return paused; +} + +bool +diskquota_is_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + bool is_readiness_logged; + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + MonitorDBEntry hash_entry; + bool found; + + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); + is_readiness_logged = found ? hash_entry->is_readiness_logged : false; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + + return is_readiness_logged; +} + +void +diskquota_set_readiness_logged() +{ + Assert(MyDatabaseId != InvalidOid); + + /* + * We actually need ROW EXCLUSIVE lock here. Given that the current worker + * is the the only process that modifies the entry, it is safe to only take + * the shared lock. + */ + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + MonitorDBEntry hash_entry; + bool found; + + hash_entry = (MonitorDBEntry)hash_search(monitored_dbid_cache, (void *)&MyDatabaseId, HASH_FIND, &found); + hash_entry->is_readiness_logged = true; + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +bool +worker_increase_epoch(Oid dbid) +{ + bool found = false; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + + if (found) + { + pg_atomic_fetch_add_u32(&(entry->epoch), 1); + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + return found; +} + +uint32 +worker_get_epoch(Oid dbid) +{ + bool found = false; + uint32 epoch = 0; + MonitorDBEntry entry; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + if (found) + { + epoch = pg_atomic_read_u32(&(entry->epoch)); + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); + if (!found) + { + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] database \"%s\" not found", get_database_name(dbid)))); + } + return epoch; +} + +/* + * Function to update the db list on each segment + * Will print a WARNING to log if out of memory + */ +void +update_monitor_db(Oid dbid, FetchTableStatType action) +{ + bool found = false; + + // add/remove the dbid to monitoring database cache to filter out table not under + // monitoring in hook functions + + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); + if (action == ADD_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) + { + ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there ary too many databases to monitor"))); + } + entry->paused = false; + pg_atomic_init_u32(&(entry->epoch), 0); + pg_atomic_init_u32(&(entry->status), DB_INIT); + } + else if (action == REMOVE_DB_FROM_BEING_MONITORED) + { + hash_search(monitored_dbid_cache, &dbid, HASH_REMOVE, &found); + } + else if (action == PAUSE_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + if (found) + { + entry->paused = true; + } + } + else if (action == RESUME_DB_TO_MONITOR) + { + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + + if (found) + { + entry->paused = false; + } + } + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +void +update_monitordb_status(Oid dbid, uint32 status) +{ + MonitorDBEntry entry; + bool found; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_SHARED); + { + entry = hash_search(monitored_dbid_cache, &dbid, HASH_FIND, &found); + } + if (found) + { + Assert(status < DB_STATUS_MAX); + pg_atomic_write_u32(&(entry->status), status); + } + else + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] database %u not found", dbid))); + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} + +static bool +check_for_timeout(TimestampTz start_time) +{ + long diff_secs = 0; + int diff_usecs = 0; + TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); + if (diff_secs >= 60) + { + ereport(NOTICE, (errmsg("[diskquota] timeout when waiting for worker"), + errhint("please check if the bgworker is still alive."))); + return true; + } + return false; +} + +static MonitorDBEntry +dump_monitored_dbid_cache(long *nitems) +{ + HASH_SEQ_STATUS seq; + MonitorDBEntry curEntry; + int count = *nitems = hash_get_num_entries(monitored_dbid_cache); + MonitorDBEntry entries = curEntry = (MonitorDBEntry)palloc(sizeof(struct MonitorDBEntryStruct) * count); + + hash_seq_init(&seq, monitored_dbid_cache); + MonitorDBEntry entry; + while ((entry = hash_seq_search(&seq)) != NULL) + { + Assert(count > 0); + memcpy(curEntry, entry, sizeof(struct MonitorDBEntryStruct)); + curEntry++; + count--; + } + Assert(count == 0); + return entries; +} diff --git a/test_util.sql b/test_util.sql deleted file mode 100644 index b92e48e4d7d..00000000000 --- a/test_util.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TYPE diskquota.db_status AS ( - "id" smallint, - "dbid" oid, - "workerid" smallint, - "status" smallint, - "last_run_time" timestamptz, - "cost" smallint, - "next_run_time" timestamptz, - "epoch" int8 -); -CREATE FUNCTION diskquota.db_status() RETURNS setof diskquota.db_status AS '$libdir/diskquota-2.1.so', 'db_status' LANGUAGE C VOLATILE; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 099eda3c15a..42ae25e3491 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -10,6 +10,7 @@ RegressTarget_Add(regress SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule REGRESS_OPTS --load-extension=gp_inject_fault + --load-extension=diskquota_test --dbname=contrib_regression) RegressTarget_Add(isolation2 @@ -26,7 +27,16 @@ RegressTarget_Add(isolation2 --load-extension=gp_inject_fault --dbname=isolation2test) +add_custom_target(install_test_extension + COMMAND + cmake -E copy ${CMAKE_SOURCE_DIR}/diskquota_test.control ${PG_HOME}/share/postgresql/extension + COMMAND + cmake -E copy ${CMAKE_SOURCE_DIR}/diskquota_test--1.0.sql ${PG_HOME}/share/postgresql/extension + ) + add_custom_target(installcheck) +add_dependencies(isolation2 install_test_extension) +add_dependencies(regress install_test_extension) add_dependencies(installcheck isolation2 regress) # Example to run test_truncate infinite times diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index ee2fe947b7b..c3292e1e5ee 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,7 +1,7 @@ test: config test: test_create_extension test: test_readiness_logged -test: test_init_table_size_table +#test: test_init_table_size_table test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size diff --git a/tests/regress/expected/test_activetable_limit.out b/tests/regress/expected/test_activetable_limit.out index b5cc0fae4f8..c556f32bb38 100644 --- a/tests/regress/expected/test_activetable_limit.out +++ b/tests/regress/expected/test_activetable_limit.out @@ -49,8 +49,6 @@ ERROR: schema's disk space quota exceeded with name: s -- this test case is useless, remove this if anyone dislike it. -- but the hash capacity is smaller than 6, so the test case works for issue 51 DROP EXTENSION diskquota; --- wait worker exit -\! sleep 1 \c contrib_regression DROP DATABASE test_tablenum_limit_01; DROP DATABASE test_tablenum_limit_02; diff --git a/tests/regress/expected/test_ctas_no_preload_lib.out b/tests/regress/expected/test_ctas_no_preload_lib.out index 172ab45fdb7..b85a18ac92b 100644 --- a/tests/regress/expected/test_ctas_no_preload_lib.out +++ b/tests/regress/expected/test_ctas_no_preload_lib.out @@ -9,15 +9,6 @@ DISTRIBUTED BY (i); \! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null \! gpstop -far > /dev/null \c --- Make sure that the worker has started. --- We cannot use wait_for_worker_new_epoch() here because the worker might not --- have started yet. -SELECT pg_sleep(1); - pg_sleep ----------- - -(1 row) - SET ROLE test; -- Init table_size to include the table SELECT diskquota.init_table_size_table(); @@ -30,6 +21,12 @@ SELECT diskquota.init_table_size_table(); \! gpstop -far > /dev/null \c SET ROLE test; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; tableid | size | segid diff --git a/tests/regress/expected/test_extension.out b/tests/regress/expected/test_extension.out index 4d53bccbc06..25b4c7a4cd3 100644 --- a/tests/regress/expected/test_extension.out +++ b/tests/regress/expected/test_extension.out @@ -59,14 +59,14 @@ show diskquota.max_workers; 20 (1 row) -\! sleep 3; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -2 --- FIXME: We need to sleep for a while each time after CREATE EXTENSION and --- DROP EXTENSION to wait for the bgworker to start or to exit. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + \c dbx0 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -3 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -122,8 +122,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -4 INSERT INTO SX.a values(generate_series(0, 10)); ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; @@ -135,8 +133,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -5 CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -157,8 +153,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -6 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -185,8 +179,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -7 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -213,8 +205,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -8 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -241,8 +231,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -9 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -269,8 +257,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -10 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -297,8 +283,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -11 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -325,8 +309,6 @@ ERROR: schema's disk space quota exceeded with name: sx DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -12 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -335,8 +317,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); \c dbx10 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -13 SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -357,8 +337,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -12 \c dbx1 SELECT diskquota.pause(); pause @@ -373,8 +351,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -11 \c dbx2 SELECT diskquota.pause(); pause @@ -389,8 +365,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -10 \c dbx3 SELECT diskquota.pause(); pause @@ -405,8 +379,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -9 \c dbx4 SELECT diskquota.pause(); pause @@ -421,8 +393,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -8 \c dbx5 SELECT diskquota.pause(); pause @@ -437,8 +407,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -7 \c dbx6 SELECT diskquota.pause(); pause @@ -453,8 +421,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -6 \c dbx7 SELECT diskquota.pause(); pause @@ -469,8 +435,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -5 \c dbx8 SELECT diskquota.pause(); pause @@ -485,8 +449,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -4 \c dbx9 SELECT diskquota.pause(); pause @@ -501,8 +463,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -3 \c dbx10 SELECT diskquota.pause(); pause @@ -517,8 +477,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -2 \c contrib_regression DROP DATABASE dbx0 ; DROP DATABASE dbx1 ; diff --git a/tests/regress/expected/test_insert_after_drop.out b/tests/regress/expected/test_insert_after_drop.out index a1e154a7812..4d80cfeaf82 100644 --- a/tests/regress/expected/test_insert_after_drop.out +++ b/tests/regress/expected/test_insert_after_drop.out @@ -16,22 +16,15 @@ HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sur INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) INSERT INTO a SELECT generate_series(1,100); ERROR: schema's disk space quota exceeded with name: sdrtbl DROP EXTENSION diskquota; --- sleep 1 second in case of system slow -SELECT pg_sleep(1); - pg_sleep ----------- - -(1 row) - INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; \c postgres diff --git a/tests/regress/expected/test_readiness_logged.out b/tests/regress/expected/test_readiness_logged.out index bd2bfae966e..c798f08b0ee 100644 --- a/tests/regress/expected/test_readiness_logged.out +++ b/tests/regress/expected/test_readiness_logged.out @@ -4,10 +4,11 @@ CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; WARNING: [diskquota] diskquota is not ready because current database is not empty HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -SELECT pg_sleep(5); --Wait for the check completes - pg_sleep ----------- - +CREATE EXTENSION diskquota_test; +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t (1 row) SELECT count(*) FROM gp_toolkit.gp_log_database @@ -19,10 +20,10 @@ WHERE logmessage = '[diskquota] diskquota is not ready'; \! gpstop -raf > /dev/null \c -SELECT pg_sleep(1); --Wait for the check completes - pg_sleep ----------- - +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t (1 row) SELECT count(*) FROM gp_toolkit.gp_log_database diff --git a/tests/regress/expected/test_table_size.out b/tests/regress/expected/test_table_size.out index e953b445d1e..27b076725bd 100644 --- a/tests/regress/expected/test_table_size.out +++ b/tests/regress/expected/test_table_size.out @@ -3,17 +3,20 @@ create table a(i text) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into a select * from generate_series(1,10000); -select pg_sleep(2); - pg_sleep ----------- - +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t (1 row) -create table buffer(oid oid, relname name, size bigint) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'oid' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; -select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; +select pg_table_size('a') as table_size; + table_size +------------ + 819200 +(1 row) + +\gset +select :table_size = diskquota.table_size.size from diskquota.table_size where tableid = 'a'::regclass and segid=-1; ?column? ---------- t diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out index f2e1fc60b18..785c8bff409 100644 --- a/tests/regress/expected/test_update_db_cache.out +++ b/tests/regress/expected/test_update_db_cache.out @@ -3,6 +3,7 @@ CREATE DATABASE test_db_cache; --end_ignore \c test_db_cache CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; -- Wait until the db cache gets updated SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -37,15 +38,10 @@ DISTRIBUTED BY (i); CREATE EXTENSION diskquota; WARNING: [diskquota] diskquota is not ready because current database is not empty HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota --- Sleep until the worker adds the current db to cache so that it can be found --- when DROP EXTENSION. --- FIXME: We cannot use wait_for_worker_new_epoch() here because --- diskquota.state is not clean. Change sleep() to wait() after removing --- diskquota.state -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t (1 row) -- Should find nothing since t_no_extension is not recorded. diff --git a/tests/regress/expected/test_worker_not_ready.out b/tests/regress/expected/test_worker_not_ready.out index 6e916b4791d..8d61fb6255b 100644 --- a/tests/regress/expected/test_worker_not_ready.out +++ b/tests/regress/expected/test_worker_not_ready.out @@ -4,6 +4,7 @@ CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; WARNING: [diskquota] diskquota is not ready because current database is not empty HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +CREATE EXTENSION diskquota_test; SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); ERROR: Can not set disk quota for system owner: gpadmin SELECT diskquota.pause(); @@ -14,10 +15,10 @@ SELECT diskquota.pause(); -- diskquota.wait_for_worker_new_epoch() cannot be used here because -- diskquota.state is not clean. -SELECT pg_sleep(5); - pg_sleep ----------- - +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t (1 row) DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_activetable_limit.sql b/tests/regress/sql/test_activetable_limit.sql index 7c7bf9ded5e..9ab6666a0e0 100644 --- a/tests/regress/sql/test_activetable_limit.sql +++ b/tests/regress/sql/test_activetable_limit.sql @@ -45,9 +45,6 @@ INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed DROP EXTENSION diskquota; --- wait worker exit -\! sleep 1 - \c contrib_regression DROP DATABASE test_tablenum_limit_01; DROP DATABASE test_tablenum_limit_02; diff --git a/tests/regress/sql/test_ctas_no_preload_lib.sql b/tests/regress/sql/test_ctas_no_preload_lib.sql index 38b4478df52..9af257b905c 100644 --- a/tests/regress/sql/test_ctas_no_preload_lib.sql +++ b/tests/regress/sql/test_ctas_no_preload_lib.sql @@ -14,11 +14,6 @@ DISTRIBUTED BY (i); \! gpstop -far > /dev/null \c --- Make sure that the worker has started. --- We cannot use wait_for_worker_new_epoch() here because the worker might not --- have started yet. -SELECT pg_sleep(1); - SET ROLE test; -- Init table_size to include the table @@ -28,7 +23,7 @@ SELECT diskquota.init_table_size_table(); \! gpstop -far > /dev/null \c SET ROLE test; - +SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; diff --git a/tests/regress/sql/test_extension.sql b/tests/regress/sql/test_extension.sql index 141936b3f10..7ba3c34c1b9 100644 --- a/tests/regress/sql/test_extension.sql +++ b/tests/regress/sql/test_extension.sql @@ -20,14 +20,10 @@ CREATE DATABASE dbx10 ; show max_worker_processes; show diskquota.max_workers; -\! sleep 3; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l - --- FIXME: We need to sleep for a while each time after CREATE EXTENSION and --- DROP EXTENSION to wait for the bgworker to start or to exit. +SELECT diskquota.wait_for_worker_new_epoch(); \c dbx0 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -46,14 +42,12 @@ SELECT diskquota.init_table_size_table(); SELECT diskquota.wait_for_worker_new_epoch(); SELECT diskquota.set_schema_quota('SX', '1MB'); SELECT diskquota.wait_for_worker_new_epoch(); -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l INSERT INTO SX.a values(generate_series(0, 10)); DROP TABLE SX.a; \c dbx2 CREATE EXTENSION diskquota; SELECT diskquota.wait_for_worker_new_epoch(); -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); SELECT diskquota.set_schema_quota('SX', '1MB'); @@ -64,7 +58,6 @@ DROP TABLE SX.a; \c dbx3 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -76,7 +69,6 @@ DROP TABLE SX.a; \c dbx4 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -88,7 +80,6 @@ DROP TABLE SX.a; \c dbx5 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -100,7 +91,6 @@ DROP TABLE SX.a; \c dbx6 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -112,7 +102,6 @@ DROP TABLE SX.a; \c dbx7 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -124,7 +113,6 @@ DROP TABLE SX.a; \c dbx8 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); @@ -136,79 +124,66 @@ DROP TABLE SX.a; \c dbx9 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); \c dbx10 CREATE EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l SELECT diskquota.wait_for_worker_new_epoch(); \c dbx0 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx1 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx2 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx3 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx4 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx5 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx6 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx7 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx8 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx9 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c dbx10 SELECT diskquota.pause(); SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; -\! sleep 0.5; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l \c contrib_regression diff --git a/tests/regress/sql/test_insert_after_drop.sql b/tests/regress/sql/test_insert_after_drop.sql index c0ae2928a8a..d744fd7c552 100644 --- a/tests/regress/sql/test_insert_after_drop.sql +++ b/tests/regress/sql/test_insert_after_drop.sql @@ -1,7 +1,6 @@ CREATE DATABASE db_insert_after_drop; \c db_insert_after_drop CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); -- Test Drop Extension CREATE SCHEMA sdrtbl; SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); @@ -10,11 +9,9 @@ CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); -SELECT pg_sleep(10); +SELECT diskquota.wait_for_worker_new_epoch(); INSERT INTO a SELECT generate_series(1,100); DROP EXTENSION diskquota; --- sleep 1 second in case of system slow -SELECT pg_sleep(1); INSERT INTO a SELECT generate_series(1,100); DROP TABLE a; diff --git a/tests/regress/sql/test_readiness_logged.sql b/tests/regress/sql/test_readiness_logged.sql index 84f75b55b7d..3151393cb0c 100644 --- a/tests/regress/sql/test_readiness_logged.sql +++ b/tests/regress/sql/test_readiness_logged.sql @@ -4,14 +4,15 @@ CREATE DATABASE test_readiness_logged; CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; -SELECT pg_sleep(5); --Wait for the check completes +CREATE EXTENSION diskquota_test; +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); SELECT count(*) FROM gp_toolkit.gp_log_database WHERE logmessage = '[diskquota] diskquota is not ready'; \! gpstop -raf > /dev/null \c -SELECT pg_sleep(1); --Wait for the check completes +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); SELECT count(*) FROM gp_toolkit.gp_log_database WHERE logmessage = '[diskquota] diskquota is not ready'; diff --git a/tests/regress/sql/test_table_size.sql b/tests/regress/sql/test_table_size.sql index 3db880bf27d..334ecc2e8e5 100644 --- a/tests/regress/sql/test_table_size.sql +++ b/tests/regress/sql/test_table_size.sql @@ -4,9 +4,8 @@ create table a(i text) DISTRIBUTED BY (i); insert into a select * from generate_series(1,10000); -select pg_sleep(2); -create table buffer(oid oid, relname name, size bigint) DISTRIBUTED BY (oid); +SELECT diskquota.wait_for_worker_new_epoch(); +select pg_table_size('a') as table_size; +\gset +select :table_size = diskquota.table_size.size from diskquota.table_size where tableid = 'a'::regclass and segid=-1; -insert into buffer select oid, relname, sum(pg_table_size(oid)) from gp_dist_random('pg_class') where oid > 16384 and (relkind='r' or relkind='m') and relname = 'a' group by oid, relname; - -select sum(buffer.size) = diskquota.table_size.size from buffer, diskquota.table_size where buffer.oid = diskquota.table_size.tableid group by diskquota.table_size.size; diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql index 01d7179d684..c426d1183f2 100644 --- a/tests/regress/sql/test_update_db_cache.sql +++ b/tests/regress/sql/test_update_db_cache.sql @@ -4,6 +4,7 @@ CREATE DATABASE test_db_cache; \c test_db_cache CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; -- Wait until the db cache gets updated SELECT diskquota.wait_for_worker_new_epoch(); @@ -26,12 +27,7 @@ DISTRIBUTED BY (i); CREATE EXTENSION diskquota; --- Sleep until the worker adds the current db to cache so that it can be found --- when DROP EXTENSION. --- FIXME: We cannot use wait_for_worker_new_epoch() here because --- diskquota.state is not clean. Change sleep() to wait() after removing --- diskquota.state -SELECT pg_sleep(5); +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); -- Should find nothing since t_no_extension is not recorded. SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) diff --git a/tests/regress/sql/test_worker_not_ready.sql b/tests/regress/sql/test_worker_not_ready.sql index e095e4b8a82..5185fc86791 100644 --- a/tests/regress/sql/test_worker_not_ready.sql +++ b/tests/regress/sql/test_worker_not_ready.sql @@ -4,6 +4,7 @@ CREATE DATABASE db_not_ready; CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); @@ -11,7 +12,7 @@ SELECT diskquota.pause(); -- diskquota.wait_for_worker_new_epoch() cannot be used here because -- diskquota.state is not clean. -SELECT pg_sleep(5); +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); DROP EXTENSION diskquota; From 7dfca8c116c139b93641e10dc1d452c43f067c49 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 17 Nov 2022 16:11:56 +0800 Subject: [PATCH 229/330] Filter the useless relation type. (#254) Co-authored-by: Xiaoran Wang --- diskquota_utility.c | 4 +- gp_activetable.c | 9 +++- tests/regress/expected/test_relkind.out | 42 +++++++++++++++++++ .../sql/test_init_table_size_table.sql | 1 + tests/regress/sql/test_relkind.sql | 21 ++++++++++ 5 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 tests/regress/expected/test_relkind.out create mode 100644 tests/regress/sql/test_relkind.sql diff --git a/diskquota_utility.c b/diskquota_utility.c index badb1356dd9..93fcabb3d7a 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -484,8 +484,8 @@ is_database_empty(void) "FROM " " pg_class AS c, " " pg_namespace AS n " - /* Fileter relkind c = composite type */ - "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota' and relkind != 'c'", + "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'" + " and relkind not in ('v', 'c', 'f')", true, 0); if (ret != SPI_OK_SELECT) elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(errno)); diff --git a/gp_activetable.c b/gp_activetable.c index 61474cf0bbc..ee66aa0ad76 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -242,7 +242,8 @@ report_altered_reloid(Oid reloid) static void report_relation_cache_helper(Oid relid) { - bool found; + bool found; + Relation rel; /* We do not collect the active table in mirror segments */ if (IsRoleMirror()) @@ -263,7 +264,11 @@ report_relation_cache_helper(Oid relid) return; } - update_relation_cache(relid); + rel = diskquota_relation_open(relid, NoLock); + if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE || rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE || + rel->rd_rel->relkind != RELKIND_VIEW) + update_relation_cache(relid); + relation_close(rel, NoLock); } /* diff --git a/tests/regress/expected/test_relkind.out b/tests/regress/expected/test_relkind.out new file mode 100644 index 00000000000..54a1c76b632 --- /dev/null +++ b/tests/regress/expected/test_relkind.out @@ -0,0 +1,42 @@ +CREATE DATABASE test_relkind; +\c test_relkind +CREATE TYPE test_type AS ( + "dbid" oid, + "datname" text +); +CREATE VIEW v AS select * from pg_class; +CREATE EXTENSION diskquota; +CREATE table test(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- diskquota.table_size should not change after creating a new type +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +---------+------+------- + test | 0 | -1 +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_relkind; diff --git a/tests/regress/sql/test_init_table_size_table.sql b/tests/regress/sql/test_init_table_size_table.sql index 9acc928f995..4c871889b92 100644 --- a/tests/regress/sql/test_init_table_size_table.sql +++ b/tests/regress/sql/test_init_table_size_table.sql @@ -43,6 +43,7 @@ FROM diskquota.table_size WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' ORDER BY tableid; + DROP TABLE t; DROP TABLE toast; DROP TABLE ao; diff --git a/tests/regress/sql/test_relkind.sql b/tests/regress/sql/test_relkind.sql new file mode 100644 index 00000000000..2764a55f4cc --- /dev/null +++ b/tests/regress/sql/test_relkind.sql @@ -0,0 +1,21 @@ +CREATE DATABASE test_relkind; +\c test_relkind +CREATE TYPE test_type AS ( + "dbid" oid, + "datname" text +); +CREATE VIEW v AS select * from pg_class; +CREATE EXTENSION diskquota; +CREATE table test(a int); +SELECT diskquota.init_table_size_table(); +-- diskquota.table_size should not change after creating a new type +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_relkind; From 2798f933bcfe6b15cc6f30aa5b689e32fc71a8dc Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Fri, 18 Nov 2022 10:57:22 +0800 Subject: [PATCH 230/330] Fix worker time out and diskquota.max_workers (#255) --- diskquota.h | 1 + monitored_db.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/diskquota.h b/diskquota.h index 86735183b70..06b0bace476 100644 --- a/diskquota.h +++ b/diskquota.h @@ -34,6 +34,7 @@ #define MAX_NUM_MONITORED_DB 50 #define LAUNCHER_SCHEMA "diskquota_utility" #define EXTENSION_SCHEMA "diskquota" +extern int diskquota_worker_timeout; typedef enum { diff --git a/monitored_db.c b/monitored_db.c index e7af1877161..b34f4b16861 100644 --- a/monitored_db.c +++ b/monitored_db.c @@ -304,7 +304,7 @@ check_for_timeout(TimestampTz start_time) long diff_secs = 0; int diff_usecs = 0; TimestampDifference(start_time, GetCurrentTimestamp(), &diff_secs, &diff_usecs); - if (diff_secs >= 60) + if (diff_secs >= diskquota_worker_timeout) { ereport(NOTICE, (errmsg("[diskquota] timeout when waiting for worker"), errhint("please check if the bgworker is still alive."))); From 23472fb42a7662d6f7c20d936c7fed6877423a6b Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 18 Nov 2022 18:55:55 +0800 Subject: [PATCH 231/330] Fix flaky "database not found" (#256) --- monitored_db.c | 5 +++-- tests/init_file | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/monitored_db.c b/monitored_db.c index b34f4b16861..1e374fc123c 100644 --- a/monitored_db.c +++ b/monitored_db.c @@ -226,7 +226,7 @@ worker_get_epoch(Oid dbid) if (!found) { ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] database \"%s\" not found", get_database_name(dbid)))); + errmsg("[diskquota] database \"%s\" not found for getting epoch", get_database_name(dbid)))); } return epoch; } @@ -294,7 +294,8 @@ update_monitordb_status(Oid dbid, uint32 status) pg_atomic_write_u32(&(entry->status), status); } else - ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] database %u not found", dbid))); + ereport(WARNING, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] database %u not found for updating monitor db", dbid))); LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); } diff --git a/tests/init_file b/tests/init_file index bc67331cfa5..2134886486b 100644 --- a/tests/init_file +++ b/tests/init_file @@ -6,6 +6,7 @@ # This pattern is extracted from gpdb/src/test/regress/init_file m/^(?:HINT|NOTICE):\s+.+\'DISTRIBUTED BY\' clause.*/ m/WARNING: \[diskquota\] worker not found for database.*/ +m/WARNING: \[diskquota\] database .* not found for getting epoch .*/ -- end_matchignore -- start_matchsubs From 3ee0fc310b782cf009a377561aea7bf07d739a9c Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 21 Nov 2022 11:19:24 +0800 Subject: [PATCH 232/330] Change the max value of diskquota.max_workers to 20 (#257) * Change the max value of diskquota.max_workers to 20 If we set the diskquota.max_workers max value to be max_worker_processes, when max_worker_processes is less than 10 and we set diskquota.max_workers value more than max_worker_processes, the cluster will crash. Set the max value to be 20, when the max_worker_processes is less than the diskquota.max_workers, diskquota can work, only some parts of the databases can not be monitored as diskquota can not start bgworkers for them. * Modify diskquota worker schedule test Test when diskquota.max_workers more than available bgworker. Co-authored-by: Zhang Hao --- diskquota.c | 4 +-- .../regress/expected/test_worker_schedule.out | 29 ++++++++++--------- tests/regress/sql/test_worker_schedule.sql | 23 +++++++++------ 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/diskquota.c b/diskquota.c index 3488e6fd41c..cc388e57fa5 100644 --- a/diskquota.c +++ b/diskquota.c @@ -289,7 +289,7 @@ define_guc_variables(void) DefineCustomIntVariable( "diskquota.max_workers", "Max number of backgroud workers to run diskquota extension, should be less than max_worker_processes.", - NULL, &diskquota_max_workers, 10, 1, max_worker_processes, PGC_POSTMASTER, 0, NULL, NULL, NULL); + NULL, &diskquota_max_workers, 10, 1, 20, PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -595,7 +595,7 @@ disk_quota_launcher_main(Datum main_arg) if (curDB != NULL) { curDBId = curDB->dbid; - elog(DEBUG1, "[diskquota] next db to run:%d", curDB->id); + elog(DEBUG1, "[diskquota] next db to run:%u", curDBId); } else elog(DEBUG1, "[diskquota] no db to run"); diff --git a/tests/regress/expected/test_worker_schedule.out b/tests/regress/expected/test_worker_schedule.out index 8003a4e230c..5a3991fc1a6 100644 --- a/tests/regress/expected/test_worker_schedule.out +++ b/tests/regress/expected/test_worker_schedule.out @@ -131,9 +131,22 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = f3 | 98304 | -1 (1 row) +\c t4 +CREATE EXTENSION diskquota; +\c t5 +CREATE EXTENSION diskquota; +\c t6 +CREATE EXTENSION diskquota; +\c t7 +CREATE EXTENSION diskquota; +\c t8 +CREATE EXTENSION diskquota; --start_ignore -\! gpconfig -c diskquota.max_workers -v 11; -20220727:14:23:23:025074 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' +\! gpconfig -c diskquota.max_workers -v 7; +20221118:19:46:57:088045 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 7' +-- available workers is 6 +\! gpconfig -c max_worker_processes -v 10; +20221118:19:46:59:088207 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 10' \! gpstop -arf; 20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf 20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... @@ -166,15 +179,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory 20220719:17:38:38:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... --end_ignore -\c -SHOW diskquota.max_workers; - diskquota.max_workers ------------------------ - 11 -(1 row) - \c t4 -CREATE EXTENSION diskquota; CREATE TABLE f4(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -192,7 +197,6 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t5 -CREATE EXTENSION diskquota; CREATE TABLE f5(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -210,7 +214,6 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t6 -CREATE EXTENSION diskquota; CREATE TABLE f6(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -228,7 +231,6 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t7 -CREATE EXTENSION diskquota; CREATE TABLE f7(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -246,7 +248,6 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t8 -CREATE EXTENSION diskquota; CREATE TABLE f8(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/sql/test_worker_schedule.sql b/tests/regress/sql/test_worker_schedule.sql index f63e02f4ec2..4ebb61728e6 100644 --- a/tests/regress/sql/test_worker_schedule.sql +++ b/tests/regress/sql/test_worker_schedule.sql @@ -54,44 +54,48 @@ INSERT into f3 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; +\c t4 +CREATE EXTENSION diskquota; +\c t5 +CREATE EXTENSION diskquota; +\c t6 +CREATE EXTENSION diskquota; +\c t7 +CREATE EXTENSION diskquota; +\c t8 +CREATE EXTENSION diskquota; --start_ignore -\! gpconfig -c diskquota.max_workers -v 11; +\! gpconfig -c diskquota.max_workers -v 7; +-- available workers is 6 +\! gpconfig -c max_worker_processes -v 10; \! gpstop -arf; --end_ignore -\c -SHOW diskquota.max_workers; - \c t4 -CREATE EXTENSION diskquota; CREATE TABLE f4(a int); INSERT into f4 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; \c t5 -CREATE EXTENSION diskquota; CREATE TABLE f5(a int); INSERT into f5 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; \c t6 -CREATE EXTENSION diskquota; CREATE TABLE f6(a int); INSERT into f6 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; \c t7 -CREATE EXTENSION diskquota; CREATE TABLE f7(a int); INSERT into f7 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; \c t8 -CREATE EXTENSION diskquota; CREATE TABLE f8(a int); INSERT into f8 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -221,5 +225,6 @@ DROP DATABASE t11; DROP DATABASE t12; \! gpconfig -r diskquota.worker_timeout; \! gpconfig -r diskquota.max_workers; +\! gpconfig -r max_worker_processes; \! gpstop -arf; --end_ignore From 6748837d9411c1cb39ddc8586e355a30badbd7c8 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 21 Nov 2022 16:16:35 +0800 Subject: [PATCH 233/330] Revert change of worker_schedule test becuase flaky test (#260) --- .../regress/expected/test_worker_schedule.out | 29 +++++++++---------- tests/regress/sql/test_worker_schedule.sql | 23 ++++++--------- 2 files changed, 23 insertions(+), 29 deletions(-) diff --git a/tests/regress/expected/test_worker_schedule.out b/tests/regress/expected/test_worker_schedule.out index 5a3991fc1a6..8003a4e230c 100644 --- a/tests/regress/expected/test_worker_schedule.out +++ b/tests/regress/expected/test_worker_schedule.out @@ -131,22 +131,9 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = f3 | 98304 | -1 (1 row) -\c t4 -CREATE EXTENSION diskquota; -\c t5 -CREATE EXTENSION diskquota; -\c t6 -CREATE EXTENSION diskquota; -\c t7 -CREATE EXTENSION diskquota; -\c t8 -CREATE EXTENSION diskquota; --start_ignore -\! gpconfig -c diskquota.max_workers -v 7; -20221118:19:46:57:088045 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 7' --- available workers is 6 -\! gpconfig -c max_worker_processes -v 10; -20221118:19:46:59:088207 gpconfig:wxiaoran-a02:wxiaoran-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 10' +\! gpconfig -c diskquota.max_workers -v 11; +20220727:14:23:23:025074 gpconfig:wxiaoran-a01:xiwang-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' \! gpstop -arf; 20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Starting gpstop with args: -arf 20220719:17:38:28:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Gathering information and validating the environment... @@ -179,7 +166,15 @@ CREATE EXTENSION diskquota; 20220719:17:38:36:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Cleaning up leftover shared memory 20220719:17:38:38:030945 gpstop:wxiaoran-a01:xiwang-[INFO]:-Restarting System... --end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 11 +(1 row) + \c t4 +CREATE EXTENSION diskquota; CREATE TABLE f4(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -197,6 +192,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t5 +CREATE EXTENSION diskquota; CREATE TABLE f5(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -214,6 +210,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t6 +CREATE EXTENSION diskquota; CREATE TABLE f6(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -231,6 +228,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t7 +CREATE EXTENSION diskquota; CREATE TABLE f7(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. @@ -248,6 +246,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = (1 row) \c t8 +CREATE EXTENSION diskquota; CREATE TABLE f8(a int); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/sql/test_worker_schedule.sql b/tests/regress/sql/test_worker_schedule.sql index 4ebb61728e6..f63e02f4ec2 100644 --- a/tests/regress/sql/test_worker_schedule.sql +++ b/tests/regress/sql/test_worker_schedule.sql @@ -54,48 +54,44 @@ INSERT into f3 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; -\c t4 -CREATE EXTENSION diskquota; -\c t5 -CREATE EXTENSION diskquota; -\c t6 -CREATE EXTENSION diskquota; -\c t7 -CREATE EXTENSION diskquota; -\c t8 -CREATE EXTENSION diskquota; --start_ignore -\! gpconfig -c diskquota.max_workers -v 7; --- available workers is 6 -\! gpconfig -c max_worker_processes -v 10; +\! gpconfig -c diskquota.max_workers -v 11; \! gpstop -arf; --end_ignore +\c +SHOW diskquota.max_workers; + \c t4 +CREATE EXTENSION diskquota; CREATE TABLE f4(a int); INSERT into f4 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; \c t5 +CREATE EXTENSION diskquota; CREATE TABLE f5(a int); INSERT into f5 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; \c t6 +CREATE EXTENSION diskquota; CREATE TABLE f6(a int); INSERT into f6 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; \c t7 +CREATE EXTENSION diskquota; CREATE TABLE f7(a int); INSERT into f7 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; \c t8 +CREATE EXTENSION diskquota; CREATE TABLE f8(a int); INSERT into f8 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -225,6 +221,5 @@ DROP DATABASE t11; DROP DATABASE t12; \! gpconfig -r diskquota.worker_timeout; \! gpconfig -r diskquota.max_workers; -\! gpconfig -r max_worker_processes; \! gpstop -arf; --end_ignore From 96a23f4210e7a909082da3d02aa7296f40082cd3 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 21 Nov 2022 17:54:45 +0800 Subject: [PATCH 234/330] Missing pause causes deadlock flaky (#258) --- .../expected/test_relation_cache.out | 20 +++++++++++++++++++ tests/isolation2/sql/test_relation_cache.sql | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/tests/isolation2/expected/test_relation_cache.out b/tests/isolation2/expected/test_relation_cache.out index fd9d4906aa5..df61fdb810f 100644 --- a/tests/isolation2/expected/test_relation_cache.out +++ b/tests/isolation2/expected/test_relation_cache.out @@ -37,8 +37,28 @@ INSERT 10000 1:@db_name tempdb1: ABORT; ABORT +1:@db_name tempdb1: SELECT diskquota.pause(); + pause +------- + +(1 row) +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) 1:@db_name tempdb1: DROP EXTENSION diskquota; DROP +2:@db_name tempdb2: SELECT diskquota.pause(); + pause +------- + +(1 row) +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) 2:@db_name tempdb2: DROP EXTENSION diskquota; DROP 1q: ... diff --git a/tests/isolation2/sql/test_relation_cache.sql b/tests/isolation2/sql/test_relation_cache.sql index 59ebde9fea3..941e4c7614c 100644 --- a/tests/isolation2/sql/test_relation_cache.sql +++ b/tests/isolation2/sql/test_relation_cache.sql @@ -17,7 +17,11 @@ CREATE DATABASE tempdb2; 1:@db_name tempdb1: ABORT; +1:@db_name tempdb1: SELECT diskquota.pause(); +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); 1:@db_name tempdb1: DROP EXTENSION diskquota; +2:@db_name tempdb2: SELECT diskquota.pause(); +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); 2:@db_name tempdb2: DROP EXTENSION diskquota; 1q: 2q: From 81f9f7743c02ab0d094fdfda8662f9dc80b9eeb8 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 24 Nov 2022 16:14:59 +0800 Subject: [PATCH 235/330] Fix memory leak when database is not ready (#262) Co-authored-by: Zhang Hao --- diskquota.c | 4 ++++ quotamodel.c | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/diskquota.c b/diskquota.c index cc388e57fa5..b51b8b93496 100644 --- a/diskquota.c +++ b/diskquota.c @@ -357,6 +357,7 @@ disk_quota_worker_main(Datum main_arg) int has_error = worker_spi_get_extension_version(&major, &minor) != 0; if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) break; + MemoryAccounting_Reset(); if (has_error) { @@ -421,6 +422,8 @@ disk_quota_worker_main(Datum main_arg) is_ready = true; break; } + + MemoryAccounting_Reset(); if (is_ready) { update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_UNREADY); @@ -937,6 +940,7 @@ process_extension_ddl_message() (errmsg("[diskquota launcher]: received create/drop extension diskquota message, extension launcher"))); do_process_extension_ddl_message(&code, local_extension_ddl_message); + MemoryAccounting_Reset(); /* Send createdrop extension diskquota result back to QD */ LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); diff --git a/quotamodel.c b/quotamodel.c index 941e5b25972..9387bd90166 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -649,7 +649,6 @@ check_diskquota_state_is_ready() CommitTransactionCommand(); else AbortCurrentTransaction(); - return is_ready; } From 30a0b4c7472f79ba41ee30380efb7ff217e10cf7 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 1 Dec 2022 09:52:47 +0800 Subject: [PATCH 236/330] Change the default value of GUC to reduce default memory cost. (#266) Change the default vaule of `diskquota.max_active_tables` from 1M to 300K, the memory usage relevant it is reduced from 300MB to 90MB. --- diskquota.c | 2 +- tests/isolation2/expected/config.out | 2 +- tests/regress/expected/config.out | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/diskquota.c b/diskquota.c index b51b8b93496..e9a58d3bcec 100644 --- a/diskquota.c +++ b/diskquota.c @@ -280,7 +280,7 @@ define_guc_variables(void) 2, min_naptime, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_active_tables", "Max number of active tables monitored by disk-quota.", NULL, - &diskquota_max_active_tables, 1 * 1024 * 1024, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); + &diskquota_max_active_tables, 300 * 1024, 1, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.worker_timeout", "Duration between each check (in seconds).", NULL, &diskquota_worker_timeout, 60, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); diff --git a/tests/isolation2/expected/config.out b/tests/isolation2/expected/config.out index 1fdc9c57b39..8ad8cbd0d08 100644 --- a/tests/isolation2/expected/config.out +++ b/tests/isolation2/expected/config.out @@ -21,7 +21,7 @@ 1: SHOW diskquota.max_active_tables; diskquota.max_active_tables ----------------------------- - 1048576 + 307200 (1 row) 1: SHOW diskquota.worker_timeout; diskquota.worker_timeout diff --git a/tests/regress/expected/config.out b/tests/regress/expected/config.out index 3b1d9761772..afeaa6b1d03 100644 --- a/tests/regress/expected/config.out +++ b/tests/regress/expected/config.out @@ -11,7 +11,7 @@ SHOW diskquota.naptime; SHOW diskquota.max_active_tables; diskquota.max_active_tables ----------------------------- - 1048576 + 307200 (1 row) SHOW diskquota.worker_timeout; From 46b6151b25427fb75d44dc475d561638416dada6 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 2 Dec 2022 12:23:59 +0800 Subject: [PATCH 237/330] Refactor TableSizeEntry to reduce memory usage. (#264) Refactor the structure of TableSizeEntry to reduce memory usage. Previously, the size of each table in each segment should be maintained in TableSizeEntry, which wastes lots of memory. In this PR, we refactor the TableSizeEntry to: struct TableSizeEntry { Oid reloid; int segid; Oid tablespaceoid; Oid namespaceoid; Oid owneroid; uint32 flag; int64 totalsize[SEGMENT_SIZE_ARRAY_LENGTH]; }; In this way, we can maintain multiple sizes in one TableSizeEntry and efficiently save memory usage. For 50 segments: reduced by 65%. For 100 segments: reduced by 82.5%. For 101 segments: reduced by 65.3%. For 1000 segments: reduced by 82.5%. --- gp_activetable.c | 3 +- quotamodel.c | 176 +++++++++++++++++++++++++++++++---------------- 2 files changed, 117 insertions(+), 62 deletions(-) diff --git a/gp_activetable.c b/gp_activetable.c index ee66aa0ad76..0cdecab6f4d 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -1133,8 +1133,7 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar /* get the segid, tablesize for each table */ segId = atoi(PQgetvalue(pgresult, j, 2)); key.segid = segId; - - entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); + entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); if (!found) { diff --git a/quotamodel.c b/quotamodel.c index 9387bd90166..a900bd9542d 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -45,9 +45,9 @@ /* cluster level max size of rejectmap */ #define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) /* init size of table_size_map */ -#define INIT_TABLES (20 * 1024) +#define INIT_TABLES (1 * 1024) /* max size of table_size_map */ -#define MAX_TABLES (200 * 1024) +#define MAX_TABLES (4 * 1024) /* cluster level init size of rejectmap */ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ @@ -55,6 +55,26 @@ #define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 6 +#define SEGMENT_SIZE_ARRAY_LENGTH 100 + +/* TableSizeEntry macro function */ +/* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into + * diskquota.table_size_table. */ +#define TableSizeEntryFlushFlag (1ul << 63) +#define TableSizeEntrySizeMask (TableSizeEntryFlushFlag - 1) +#define TableSizeEntryId(segid) ((segid + 1) / SEGMENT_SIZE_ARRAY_LENGTH) +#define TableSizeEntryIndex(segid) ((segid + 1) % SEGMENT_SIZE_ARRAY_LENGTH) +#define TableSizeEntryGetFlushFlag(entry, segid) \ + (entry->totalsize[TableSizeEntryIndex(segid)] & TableSizeEntryFlushFlag) +#define TableSizeEntrySetFlushFlag(entry, segid) entry->totalsize[TableSizeEntryIndex(segid)] |= TableSizeEntryFlushFlag +#define TableSizeEntryResetFlushFlag(entry, segid) \ + entry->totalsize[TableSizeEntryIndex(segid)] &= TableSizeEntrySizeMask +#define TableSizeEntryGetSize(entry, segid) (entry->totalsize[TableSizeEntryIndex(segid)] & TableSizeEntrySizeMask) +#define TableSizeEntrySetSize(entry, segid, size) entry->totalsize[TableSizeEntryIndex(segid)] = size +#define TableSizeEntrySegidStart(entry) (entry->id * SEGMENT_SIZE_ARRAY_LENGTH - 1) +#define TableSizeEntrySegidEnd(entry) \ + (((entry->id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) < SEGCOUNT ? ((entry->id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) \ + : SEGCOUNT) typedef struct TableSizeEntry TableSizeEntry; typedef struct NamespaceSizeEntry NamespaceSizeEntry; @@ -67,26 +87,37 @@ typedef struct LocalRejectMapEntry LocalRejectMapEntry; int SEGCOUNT = 0; /* - * local cache of table disk size and corresponding schema and owner + * local cache of table disk size and corresponding schema and owner. + * + * When id is 0, this TableSizeEntry stores the table size in the (-1 ~ + * SEGMENT_SIZE_ARRAY_LENGTH - 2)th segment, and so on. + * |---------|--------------------------------------------------------------------------| + * | id | segment index | + * |---------|--------------------------------------------------------------------------| + * | 0 | [-1, SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * | 1 | [SEGMENT_SIZE_ARRAY_LENGTH - 1, 2 * SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * | 2 | [2 * SEGMENT_SIZE_ARRAY_LENGTH - 1, 3 * SEGMENT_SIZE_ARRAY_LENGTH - 1) | + * -------------------------------------------------------------------------------------- + * + * flag's each bit is used to show the table's status, which is described in TableSizeEntryFlag. + * + * totalsize contains tables' size on segments. When id is 0, totalsize[0] is the sum of all segments' table size. + * table size including fsm, visibility map etc. */ struct TableSizeEntry { Oid reloid; - int segid; + int id; Oid tablespaceoid; Oid namespaceoid; Oid owneroid; - uint32 flag; /* flag's each bit is used to show the table's status, - * which is described in TableSizeEntryFlag. - */ - int64 totalsize; /* table size including fsm, visibility map - * etc. */ + uint32 flag; + int64 totalsize[SEGMENT_SIZE_ARRAY_LENGTH]; }; typedef enum { - TABLE_EXIST = (1 << 0), /* whether table is already dropped */ - TABLE_NEED_FLUSH = (1 << 1) /* whether need to flush to table table_size */ + TABLE_EXIST = (1 << 0), /* whether table is already dropped */ } TableSizeEntryFlag; /* @@ -548,6 +579,7 @@ vacuum_disk_quota_model(uint32 id) TableSizeEntry *tsentry = NULL; LocalRejectMapEntry *localrejectentry; struct QuotaMapEntry *qentry; + TableEntryKey key; HASHCTL hash_ctl; StringInfoData str; @@ -564,7 +596,9 @@ vacuum_disk_quota_model(uint32 id) hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { - hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); + key.reloid = tsentry->reloid; + key.segid = tsentry->id; + hash_search(table_size_map, &key, HASH_REMOVE, NULL); } /* localrejectmap */ @@ -842,6 +876,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) HASH_SEQ_STATUS iter; DiskQuotaActiveTableEntry *active_table_entry; TableEntryKey key; + TableEntryKey active_table_key; List *oidlist; ListCell *l; @@ -909,31 +944,35 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) */ for (int i = -1; i < SEGCOUNT; i++) { - key.segid = i; key.reloid = relOid; - tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); + key.segid = TableSizeEntryId(i); + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); if (!table_size_map_found) { - tsentry->reloid = relOid; - tsentry->segid = key.segid; - tsentry->totalsize = 0; + tsentry->reloid = relOid; + tsentry->id = key.segid; + memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); tsentry->owneroid = InvalidOid; tsentry->namespaceoid = InvalidOid; tsentry->tablespaceoid = InvalidOid; tsentry->flag = 0; - set_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int j = seg_st; j < seg_ed; j++) TableSizeEntrySetFlushFlag(tsentry, j); } /* mark tsentry is_exist */ if (tsentry) set_table_size_entry_flag(tsentry, TABLE_EXIST); - active_table_entry = (DiskQuotaActiveTableEntry *)hash_search(local_active_table_stat_map, &key, HASH_FIND, - &active_tbl_found); + active_table_key.reloid = relOid; + active_table_key.segid = i; + active_table_entry = (DiskQuotaActiveTableEntry *)hash_search( + local_active_table_stat_map, &active_table_key, HASH_FIND, &active_tbl_found); /* skip to recalculate the tables which are not in active list */ if (active_tbl_found) { - if (key.segid == -1) + if (i == -1) { /* pretend process as utility mode, and append the table size on master */ Gp_role = GP_ROLE_UTILITY; @@ -943,56 +982,56 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) Gp_role = GP_ROLE_DISPATCH; } /* firstly calculate the updated total size of a table */ - updated_total_size = active_table_entry->tablesize - tsentry->totalsize; + updated_total_size = active_table_entry->tablesize - TableSizeEntryGetSize(tsentry, i); /* update the table_size entry */ - tsentry->totalsize = (int64)active_table_entry->tablesize; - set_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); + TableSizeEntrySetSize(tsentry, i, active_table_entry->tablesize); + TableSizeEntrySetFlushFlag(tsentry, i); /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does * not exist in the table_size_map */ - update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, key.segid); - update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, key.segid); + update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, i); + update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, i); update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, key.segid); + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, i); update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, key.segid); + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, i); } /* table size info doesn't need to flush at init quota model stage */ if (is_init) { - reset_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); + TableSizeEntryResetFlushFlag(tsentry, i); } /* if schema change, transfer the file size */ if (tsentry->namespaceoid != relnamespace) { - transfer_table_for_quota(tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, - (Oid[]){relnamespace}, key.segid); - transfer_table_for_quota(tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, (Oid[]){relnamespace}, i); + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){relnamespace, tsentry->tablespaceoid}, key.segid); + (Oid[]){relnamespace, tsentry->tablespaceoid}, i); tsentry->namespaceoid = relnamespace; } /* if owner change, transfer the file size */ if (tsentry->owneroid != relowner) { - transfer_table_for_quota(tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, (Oid[]){relowner}, - key.segid); - transfer_table_for_quota(tsentry->totalsize, ROLE_TABLESPACE_QUOTA, + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_QUOTA, (Oid[]){tsentry->owneroid}, + (Oid[]){relowner}, i); + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){relowner, tsentry->tablespaceoid}, key.segid); + (Oid[]){relowner, tsentry->tablespaceoid}, i); tsentry->owneroid = relowner; } if (tsentry->tablespaceoid != reltablespace) { - transfer_table_for_quota(tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){tsentry->namespaceoid, reltablespace}, key.segid); - transfer_table_for_quota(tsentry->totalsize, ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, reltablespace}, i); + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){tsentry->owneroid, reltablespace}, key.segid); + (Oid[]){tsentry->owneroid, reltablespace}, i); tsentry->tablespaceoid = reltablespace; } } @@ -1013,12 +1052,18 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { - update_size_for_quota(-tsentry->totalsize, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, tsentry->segid); - update_size_for_quota(-tsentry->totalsize, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, tsentry->segid); - update_size_for_quota(-tsentry->totalsize, ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, tsentry->segid); - update_size_for_quota(-tsentry->totalsize, NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, tsentry->segid); + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int i = seg_st; i < seg_ed; i++) + { + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), ROLE_QUOTA, (Oid[]){tsentry->owneroid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, i); + update_size_for_quota(-TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, i); + } } } } @@ -1034,6 +1079,7 @@ flush_to_table_size(void) { HASH_SEQ_STATUS iter; TableSizeEntry *tsentry = NULL; + TableEntryKey key; StringInfoData delete_statement; StringInfoData insert_statement; StringInfoData deleted_table_expr; @@ -1058,22 +1104,32 @@ flush_to_table_size(void) hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { - /* delete dropped table from both table_size_map and table table_size */ - if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int i = seg_st; i < seg_ed; i++) { - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); - delete_statement_flag = true; - - hash_search(table_size_map, &tsentry->reloid, HASH_REMOVE, NULL); + /* delete dropped table from both table_size_map and table table_size */ + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) + { + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, i); + delete_statement_flag = true; + } + /* update the table size by delete+insert in table table_size */ + else if (TableSizeEntryGetFlushFlag(tsentry, i)) + { + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, i); + appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, TableSizeEntryGetSize(tsentry, i), + i); + delete_statement_flag = true; + insert_statement_flag = true; + TableSizeEntryResetFlushFlag(tsentry, i); + } } - /* update the table size by delete+insert in table table_size */ - else if (get_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH)) + if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { - reset_table_size_entry_flag(tsentry, TABLE_NEED_FLUSH); - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, tsentry->segid); - appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, tsentry->totalsize, tsentry->segid); - delete_statement_flag = true; - insert_statement_flag = true; + key.reloid = tsentry->reloid; + key.segid = tsentry->id; + hash_search(table_size_map, &key, HASH_REMOVE, NULL); } } truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); From ff39ffff93c0d48216a7e1ef7228aea75437c8ef Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 5 Dec 2022 14:12:08 +0800 Subject: [PATCH 238/330] Correct table_size_entry key (#268) There is a bug: removing TableSizeEntry from table_size_map by oid. Actually, the hash map key is TableKeyEntry. Fix it. --- quotamodel.c | 63 ++++++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index a900bd9542d..a3eeee7e0a8 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -71,10 +71,11 @@ entry->totalsize[TableSizeEntryIndex(segid)] &= TableSizeEntrySizeMask #define TableSizeEntryGetSize(entry, segid) (entry->totalsize[TableSizeEntryIndex(segid)] & TableSizeEntrySizeMask) #define TableSizeEntrySetSize(entry, segid, size) entry->totalsize[TableSizeEntryIndex(segid)] = size -#define TableSizeEntrySegidStart(entry) (entry->id * SEGMENT_SIZE_ARRAY_LENGTH - 1) -#define TableSizeEntrySegidEnd(entry) \ - (((entry->id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) < SEGCOUNT ? ((entry->id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) \ - : SEGCOUNT) +#define TableSizeEntrySegidStart(entry) (entry->key.id * SEGMENT_SIZE_ARRAY_LENGTH - 1) +#define TableSizeEntrySegidEnd(entry) \ + (((entry->key.id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) < SEGCOUNT \ + ? ((entry->key.id + 1) * SEGMENT_SIZE_ARRAY_LENGTH - 1) \ + : SEGCOUNT) typedef struct TableSizeEntry TableSizeEntry; typedef struct NamespaceSizeEntry NamespaceSizeEntry; @@ -104,15 +105,20 @@ int SEGCOUNT = 0; * totalsize contains tables' size on segments. When id is 0, totalsize[0] is the sum of all segments' table size. * table size including fsm, visibility map etc. */ +typedef struct TableSizeEntryKey +{ + Oid reloid; + int id; +} TableSizeEntryKey; + struct TableSizeEntry { - Oid reloid; - int id; - Oid tablespaceoid; - Oid namespaceoid; - Oid owneroid; - uint32 flag; - int64 totalsize[SEGMENT_SIZE_ARRAY_LENGTH]; + TableSizeEntryKey key; + Oid tablespaceoid; + Oid namespaceoid; + Oid owneroid; + uint32 flag; + int64 totalsize[SEGMENT_SIZE_ARRAY_LENGTH]; }; typedef enum @@ -528,7 +534,7 @@ init_disk_quota_model(uint32 id) initStringInfo(&str); memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(TableEntryKey); + hash_ctl.keysize = sizeof(TableSizeEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); hash_ctl.hash = tag_hash; @@ -579,7 +585,6 @@ vacuum_disk_quota_model(uint32 id) TableSizeEntry *tsentry = NULL; LocalRejectMapEntry *localrejectentry; struct QuotaMapEntry *qentry; - TableEntryKey key; HASHCTL hash_ctl; StringInfoData str; @@ -587,7 +592,7 @@ vacuum_disk_quota_model(uint32 id) /* table_size_map */ memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(TableEntryKey); + hash_ctl.keysize = sizeof(TableSizeEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); hash_ctl.hash = tag_hash; @@ -596,9 +601,7 @@ vacuum_disk_quota_model(uint32 id) hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { - key.reloid = tsentry->reloid; - key.segid = tsentry->id; - hash_search(table_size_map, &key, HASH_REMOVE, NULL); + hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); } /* localrejectmap */ @@ -875,7 +878,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) Oid relOid; HASH_SEQ_STATUS iter; DiskQuotaActiveTableEntry *active_table_entry; - TableEntryKey key; + TableSizeEntryKey key; TableEntryKey active_table_key; List *oidlist; ListCell *l; @@ -945,20 +948,21 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) for (int i = -1; i < SEGCOUNT; i++) { key.reloid = relOid; - key.segid = TableSizeEntryId(i); + key.id = TableSizeEntryId(i); tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); if (!table_size_map_found) { - tsentry->reloid = relOid; - tsentry->id = key.segid; + tsentry->key.reloid = relOid; + tsentry->key.id = key.id; memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); tsentry->owneroid = InvalidOid; tsentry->namespaceoid = InvalidOid; tsentry->tablespaceoid = InvalidOid; tsentry->flag = 0; - int seg_st = TableSizeEntrySegidStart(tsentry); - int seg_ed = TableSizeEntrySegidEnd(tsentry); + + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); for (int j = seg_st; j < seg_ed; j++) TableSizeEntrySetFlushFlag(tsentry, j); } @@ -1079,7 +1083,6 @@ flush_to_table_size(void) { HASH_SEQ_STATUS iter; TableSizeEntry *tsentry = NULL; - TableEntryKey key; StringInfoData delete_statement; StringInfoData insert_statement; StringInfoData deleted_table_expr; @@ -1111,15 +1114,15 @@ flush_to_table_size(void) /* delete dropped table from both table_size_map and table table_size */ if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, i); + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->key.reloid, i); delete_statement_flag = true; } /* update the table size by delete+insert in table table_size */ else if (TableSizeEntryGetFlushFlag(tsentry, i)) { - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->reloid, i); - appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->reloid, TableSizeEntryGetSize(tsentry, i), - i); + appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->key.reloid, i); + appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->key.reloid, + TableSizeEntryGetSize(tsentry, i), i); delete_statement_flag = true; insert_statement_flag = true; TableSizeEntryResetFlushFlag(tsentry, i); @@ -1127,9 +1130,7 @@ flush_to_table_size(void) } if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { - key.reloid = tsentry->reloid; - key.segid = tsentry->id; - hash_search(table_size_map, &key, HASH_REMOVE, NULL); + hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); } } truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); From 083464133bb410de813f8228c76cc945ab7b9f36 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 5 Dec 2022 14:21:57 +0800 Subject: [PATCH 239/330] Add cmake opt DISKQUOTA_DDL_CHANGE_CHECK (#270) --- diskquota.c | 2 +- upgrade_test/CMakeLists.txt | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/diskquota.c b/diskquota.c index e9a58d3bcec..aab7c6121e3 100644 --- a/diskquota.c +++ b/diskquota.c @@ -652,7 +652,7 @@ disk_quota_launcher_main(Datum main_arg) if (nap.tv_sec != 0 || nap.tv_usec != 0) { - elog(DEBUG1, "[diskquota] naptime sec:%ld, usec:%d", nap.tv_sec, nap.tv_usec); + elog(DEBUG1, "[diskquota] naptime sec:%ld, usec:%ld", nap.tv_sec, nap.tv_usec); rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); ResetLatch(&MyProc->procLatch); diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 63ef32df102..32fbf81345c 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -1,5 +1,10 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) +if(NOT DEFINED DISKQUOTA_DDL_CHANGE_CHECK) + set(DISKQUOTA_DDL_CHANGE_CHECK ON CACHE + STRING "Skip the DDL updates check. Should not be disabled on CI" FORCE) +endif() + regresstarget_add( upgradecheck INIT_FILE @@ -44,7 +49,7 @@ foreach(ddl IN LISTS ddl_files) endforeach() # if DDL file modified, insure the last release file passed in -if(DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) +if(DISKQUOTA_DDL_CHANGE_CHECK AND DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) message( FATAL_ERROR "DDL file modify detected, upgrade test is required. Add -DDISKQUOTA_LAST_RELEASE_PATH=//diskquota--_.tar.gz. And re-try the generation" From 8c09af41b8d8d502df8c38454c3b3353eb182c1b Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 5 Dec 2022 16:40:44 +0800 Subject: [PATCH 240/330] Fix regression caused by #264 (#272) The #264 caused some segment ratio tests fail. The entry's relevant fields need to be set at the end of the iteration. Otherwise, only the first seg will pass the condition check. --- quotamodel.c | 61 +++++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index a3eeee7e0a8..52bb5c0d821 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -945,16 +945,18 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) * and the content id is continuous, so it's safe to use SEGCOUNT * to get segid. */ - for (int i = -1; i < SEGCOUNT; i++) + for (int cur_segid = -1; cur_segid < SEGCOUNT; cur_segid++) { key.reloid = relOid; - key.id = TableSizeEntryId(i); + key.id = TableSizeEntryId(cur_segid); tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); if (!table_size_map_found) { tsentry->key.reloid = relOid; tsentry->key.id = key.id; + Assert(TableSizeEntrySegidStart(tsentry) == cur_segid); + memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); tsentry->owneroid = InvalidOid; tsentry->namespaceoid = InvalidOid; @@ -969,14 +971,14 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) /* mark tsentry is_exist */ if (tsentry) set_table_size_entry_flag(tsentry, TABLE_EXIST); active_table_key.reloid = relOid; - active_table_key.segid = i; + active_table_key.segid = cur_segid; active_table_entry = (DiskQuotaActiveTableEntry *)hash_search( local_active_table_stat_map, &active_table_key, HASH_FIND, &active_tbl_found); /* skip to recalculate the tables which are not in active list */ if (active_tbl_found) { - if (i == -1) + if (cur_segid == -1) { /* pretend process as utility mode, and append the table size on master */ Gp_role = GP_ROLE_UTILITY; @@ -986,56 +988,57 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) Gp_role = GP_ROLE_DISPATCH; } /* firstly calculate the updated total size of a table */ - updated_total_size = active_table_entry->tablesize - TableSizeEntryGetSize(tsentry, i); + updated_total_size = active_table_entry->tablesize - TableSizeEntryGetSize(tsentry, cur_segid); /* update the table_size entry */ - TableSizeEntrySetSize(tsentry, i, active_table_entry->tablesize); - TableSizeEntrySetFlushFlag(tsentry, i); + TableSizeEntrySetSize(tsentry, cur_segid, active_table_entry->tablesize); + TableSizeEntrySetFlushFlag(tsentry, cur_segid); /* update the disk usage, there may be entries in the map whose keys are InvlidOid as the tsentry does * not exist in the table_size_map */ - update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, i); - update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, i); + update_size_for_quota(updated_total_size, NAMESPACE_QUOTA, (Oid[]){tsentry->namespaceoid}, cur_segid); + update_size_for_quota(updated_total_size, ROLE_QUOTA, (Oid[]){tsentry->owneroid}, cur_segid); update_size_for_quota(updated_total_size, ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, i); + (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, cur_segid); update_size_for_quota(updated_total_size, NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, i); + (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, cur_segid); } /* table size info doesn't need to flush at init quota model stage */ if (is_init) { - TableSizeEntryResetFlushFlag(tsentry, i); + TableSizeEntryResetFlushFlag(tsentry, cur_segid); } /* if schema change, transfer the file size */ if (tsentry->namespaceoid != relnamespace) { - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid}, (Oid[]){relnamespace}, i); - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, - (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){relnamespace, tsentry->tablespaceoid}, i); - tsentry->namespaceoid = relnamespace; + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), NAMESPACE_QUOTA, + (Oid[]){tsentry->namespaceoid}, (Oid[]){relnamespace}, cur_segid); } /* if owner change, transfer the file size */ if (tsentry->owneroid != relowner) { - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_QUOTA, (Oid[]){tsentry->owneroid}, - (Oid[]){relowner}, i); - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, - (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){relowner, tsentry->tablespaceoid}, i); - tsentry->owneroid = relowner; + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), ROLE_QUOTA, + (Oid[]){tsentry->owneroid}, (Oid[]){relowner}, cur_segid); } - if (tsentry->tablespaceoid != reltablespace) + if (tsentry->tablespaceoid != reltablespace || tsentry->namespaceoid != relnamespace) { - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), NAMESPACE_TABLESPACE_QUOTA, + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), NAMESPACE_TABLESPACE_QUOTA, (Oid[]){tsentry->namespaceoid, tsentry->tablespaceoid}, - (Oid[]){tsentry->namespaceoid, reltablespace}, i); - transfer_table_for_quota(TableSizeEntryGetSize(tsentry, i), ROLE_TABLESPACE_QUOTA, + (Oid[]){relnamespace, reltablespace}, cur_segid); + } + if (tsentry->tablespaceoid != reltablespace || tsentry->owneroid != relowner) + { + transfer_table_for_quota(TableSizeEntryGetSize(tsentry, cur_segid), ROLE_TABLESPACE_QUOTA, (Oid[]){tsentry->owneroid, tsentry->tablespaceoid}, - (Oid[]){tsentry->owneroid, reltablespace}, i); + (Oid[]){relowner, reltablespace}, cur_segid); + } + + if (cur_segid == (TableSizeEntrySegidEnd(tsentry) - 1)) + { + tsentry->namespaceoid = relnamespace; + tsentry->owneroid = relowner; tsentry->tablespaceoid = reltablespace; } } From 6f933f90c24ba86d64660b618f3a35843beab6fe Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 5 Dec 2022 20:51:52 +0800 Subject: [PATCH 241/330] Add a GUC `diskquota.max_table_segments`. (#271) Use diskquota.max_table_segments to define the max number of table segments in the cluster. The value equal (segment_number + 1) * max_table_number. Since hashmap in the shared memory can take over others' memory space even when it exceeds the limit, a counter is added to count how many tables have been added to the table_size_map, to prevent too many entries to be created. Co-authored-by: Xiaoran Wang Co-authored-by: Chen Mulong --- diskquota.c | 22 ++++++++++++---- diskquota.h | 7 +++++ quotamodel.c | 73 +++++++++++++++++++++++++++++++++++----------------- 3 files changed, 73 insertions(+), 29 deletions(-) diff --git a/diskquota.c b/diskquota.c index aab7c6121e3..2740f2768f6 100644 --- a/diskquota.c +++ b/diskquota.c @@ -67,11 +67,12 @@ static volatile sig_atomic_t got_sigusr1 = false; static volatile sig_atomic_t got_sigusr2 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; -int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ -bool diskquota_hardlimit = false; -int diskquota_max_workers = 10; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ +bool diskquota_hardlimit = false; +int diskquota_max_workers = 10; +int diskquota_max_table_segments = 0; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -83,6 +84,9 @@ static DiskQuotaWorkerEntry *volatile MyWorkerInfo = NULL; // how many database diskquota are monitoring on static int num_db = 0; +/* how many TableSizeEntry are maintained in all the table_size_map in shared memory*/ +pg_atomic_uint32 *diskquota_table_size_entry_num; + static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; #define MIN_SLEEPTIME 100 /* milliseconds */ @@ -290,6 +294,10 @@ define_guc_variables(void) "diskquota.max_workers", "Max number of backgroud workers to run diskquota extension, should be less than max_worker_processes.", NULL, &diskquota_max_workers, 10, 1, 20, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_table_segments", "Max number of tables segments on the cluster.", NULL, + &diskquota_max_table_segments, 10 * 1024 * 1024, + INIT_NUM_TABLE_SIZE_ENTRIES * MAX_NUM_MONITORED_DB, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, + NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -1554,6 +1562,10 @@ init_launcher_shmem() DiskquotaLauncherShmem->dbArray[i].workerId = INVALID_WORKER_ID; } } + /* init TableSizeEntry counter */ + diskquota_table_size_entry_num = + ShmemInitStruct("diskquota TableSizeEntry counter", sizeof(pg_atomic_uint32), &found); + if (!found) pg_atomic_init_u32(diskquota_table_size_entry_num, 0); } /* diff --git a/diskquota.h b/diskquota.h index 06b0bace476..28008fc1cc3 100644 --- a/diskquota.h +++ b/diskquota.h @@ -30,6 +30,13 @@ #include +/* init number of TableSizeEntry in table_size_map */ +#define INIT_NUM_TABLE_SIZE_ENTRIES 128 +/* max number of TableSizeEntry in table_size_map */ +#define MAX_NUM_TABLE_SIZE_ENTRIES (diskquota_max_table_segments / SEGMENT_SIZE_ARRAY_LENGTH) +/* length of segment size array in TableSizeEntry */ +#define SEGMENT_SIZE_ARRAY_LENGTH 100 + /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 50 #define LAUNCHER_SCHEMA "diskquota_utility" diff --git a/quotamodel.c b/quotamodel.c index 52bb5c0d821..85c1e97d30a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -44,10 +44,6 @@ /* cluster level max size of rejectmap */ #define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) -/* init size of table_size_map */ -#define INIT_TABLES (1 * 1024) -/* max size of table_size_map */ -#define MAX_TABLES (4 * 1024) /* cluster level init size of rejectmap */ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ @@ -55,7 +51,6 @@ #define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 6 -#define SEGMENT_SIZE_ARRAY_LENGTH 100 /* TableSizeEntry macro function */ /* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into @@ -85,7 +80,9 @@ typedef struct RejectMapEntry RejectMapEntry; typedef struct GlobalRejectMapEntry GlobalRejectMapEntry; typedef struct LocalRejectMapEntry LocalRejectMapEntry; -int SEGCOUNT = 0; +int SEGCOUNT = 0; +extern int diskquota_max_table_segments; +extern pg_atomic_uint32 *diskquota_table_size_entry_num; /* * local cache of table disk size and corresponding schema and owner. @@ -490,7 +487,7 @@ static Size diskquota_worker_shmem_size() { Size size; - size = hash_estimate_size(MAX_TABLES, sizeof(TableSizeEntry)); + size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / MAX_NUM_MONITORED_DB + 100, sizeof(TableSizeEntry)); size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); size = add_size(size, hash_estimate_size(1024L, sizeof(struct QuotaMapEntry)) * NUM_QUOTA_TYPES); return size; @@ -516,6 +513,7 @@ DiskQuotaShmemSize(void) if (IS_QUERY_DISPATCHER()) { size = add_size(size, diskquota_launcher_shmem_size()); + size = add_size(size, sizeof(pg_atomic_uint32)); size = add_size(size, diskquota_worker_shmem_size() * MAX_NUM_MONITORED_DB); } @@ -539,7 +537,8 @@ init_disk_quota_model(uint32 id) hash_ctl.hash = tag_hash; format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, INIT_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + table_size_map = ShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, &hash_ctl, + HASH_ELEM | HASH_FUNCTION); /* for localrejectmap */ memset(&hash_ctl, 0, sizeof(hash_ctl)); @@ -597,11 +596,13 @@ vacuum_disk_quota_model(uint32 id) hash_ctl.hash = tag_hash; format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, INIT_TABLES, MAX_TABLES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + table_size_map = ShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, &hash_ctl, + HASH_ELEM | HASH_FUNCTION); hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_table_size_entry_num, 1); } /* localrejectmap */ @@ -950,22 +951,45 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) key.reloid = relOid; key.id = TableSizeEntryId(cur_segid); - tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); - if (!table_size_map_found) + uint32 counter = pg_atomic_read_u32(diskquota_table_size_entry_num); + if (counter > MAX_NUM_TABLE_SIZE_ENTRIES) { - tsentry->key.reloid = relOid; - tsentry->key.id = key.id; - Assert(TableSizeEntrySegidStart(tsentry) == cur_segid); - - memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); - tsentry->owneroid = InvalidOid; - tsentry->namespaceoid = InvalidOid; - tsentry->tablespaceoid = InvalidOid; - tsentry->flag = 0; - - int seg_st = TableSizeEntrySegidStart(tsentry); - int seg_ed = TableSizeEntrySegidEnd(tsentry); - for (int j = seg_st; j < seg_ed; j++) TableSizeEntrySetFlushFlag(tsentry, j); + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_FIND, &table_size_map_found); + /* Too many tables have been added to the table_size_map, to avoid diskquota using + too much share memory, just quit the loop. The diskquota won't work correctly + anymore. */ + if (!table_size_map_found) + { + break; + } + } + else + { + tsentry = (TableSizeEntry *)hash_search(table_size_map, &key, HASH_ENTER, &table_size_map_found); + + if (!table_size_map_found) + { + counter = pg_atomic_add_fetch_u32(diskquota_table_size_entry_num, 1); + if (counter > MAX_NUM_TABLE_SIZE_ENTRIES) + { + ereport(WARNING, (errmsg("[diskquota] the number of tables exceeds the limit, please increase " + "the GUC value for diskquota.max_table_segments. Current " + "diskquota.max_table_segments value: %d", + diskquota_max_table_segments))); + } + tsentry->key.reloid = relOid; + tsentry->key.id = key.id; + Assert(TableSizeEntrySegidStart(tsentry) == cur_segid); + memset(tsentry->totalsize, 0, sizeof(tsentry->totalsize)); + tsentry->owneroid = InvalidOid; + tsentry->namespaceoid = InvalidOid; + tsentry->tablespaceoid = InvalidOid; + tsentry->flag = 0; + + int seg_st = TableSizeEntrySegidStart(tsentry); + int seg_ed = TableSizeEntrySegidEnd(tsentry); + for (int j = seg_st; j < seg_ed; j++) TableSizeEntrySetFlushFlag(tsentry, j); + } } /* mark tsentry is_exist */ @@ -1134,6 +1158,7 @@ flush_to_table_size(void) if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { hash_search(table_size_map, &tsentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_table_size_entry_num, 1); } } truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); From a689958a9614125b20bf00a1bcad3306ca50df94 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 6 Dec 2022 15:43:26 +0800 Subject: [PATCH 242/330] Enable tests (#274) --- tests/regress/diskquota_schedule | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index c3292e1e5ee..7cbb0315f57 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,7 +1,7 @@ test: config test: test_create_extension test: test_readiness_logged -#test: test_init_table_size_table +test: test_init_table_size_table test: test_relation_size test: test_relation_cache test: test_uncommitted_table_size @@ -9,7 +9,7 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status -#test: test_update_db_cache +test: test_update_db_cache test: test_quota_view_no_table # disable this test due to GPDB behavior change # test: test_table_size From 73114d8366c934bc808269287081002631229949 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Thu, 8 Dec 2022 18:24:16 +0800 Subject: [PATCH 243/330] Optimize dispatching reject map to segments (#275) Avoid dispatching reject map to segments when it is not changed --- quotamodel.c | 52 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/quotamodel.c b/quotamodel.c index 85c1e97d30a..7ad3c9d4f90 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -224,7 +224,7 @@ static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_k static void refresh_disk_quota_usage(bool is_init); static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); static void flush_to_table_size(void); -static void flush_local_reject_map(void); +static bool flush_local_reject_map(void); static void dispatch_rejectmap(HTAB *local_active_table_stat_map); static bool load_quotas(void); static void do_load_quotas(void); @@ -794,8 +794,13 @@ refresh_disk_quota_usage(bool is_init) /* * initialization stage all the tables are active. later loop, only the * tables whose disk size changed will be treated as active + * + * local_active_table_stat_map only contains the active tables which belong + * to the current database. */ local_active_table_stat_map = gp_fetch_active_tables(is_init); + bool hasActiveTable = (hash_get_num_entries(local_active_table_stat_map) != 0); + /* TODO: if we can skip the following steps when there is no active table */ /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(is_init, local_active_table_stat_map); for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) @@ -805,9 +810,10 @@ refresh_disk_quota_usage(bool is_init) /* flush local table_size_map to user table table_size */ flush_to_table_size(); /* copy local reject map back to shared reject map */ - flush_local_reject_map(); + bool reject_map_changed = flush_local_reject_map(); /* Dispatch rejectmap entries to segments to perform hard-limit. */ - if (diskquota_hardlimit) dispatch_rejectmap(local_active_table_stat_map); + if (diskquota_hardlimit && (reject_map_changed || hasActiveTable)) + dispatch_rejectmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } PG_CATCH(); @@ -1198,9 +1204,10 @@ flush_to_table_size(void) * exceed the quota limit. * local_rejectmap is used to reduce the lock contention. */ -static void +static bool flush_local_reject_map(void) { + bool changed = false; HASH_SEQ_STATUS iter; LocalRejectMapEntry *localrejectentry; GlobalRejectMapEntry *rejectentry; @@ -1211,6 +1218,16 @@ flush_local_reject_map(void) hash_seq_init(&iter, local_disk_quota_reject_map); while ((localrejectentry = hash_seq_search(&iter)) != NULL) { + /* + * If localrejectentry->isexceeded is true, and it alredy exists in disk_quota_reject_map, + * that means the reject entry exists in both last loop and current loop, but its segexceeded + * feild may have changed. + * + * If localrejectentry->isexceeded is true, and it doesn't exist in disk_quota_reject_map, + * then it is a new added reject entry in this loop. + * + * Otherwise, it means the reject entry has gone, we need to delete it. + */ if (localrejectentry->isexceeded) { rejectentry = (GlobalRejectMapEntry *)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, @@ -1220,31 +1237,36 @@ flush_local_reject_map(void) ereport(WARNING, (errmsg("[diskquota] Shared disk quota reject map size limit reached." "Some out-of-limit schemas or roles will be lost" "in rejectmap."))); + continue; } - else + /* new db objects which exceed quota limit */ + if (!found) { - /* new db objects which exceed quota limit */ - if (!found) - { - rejectentry->keyitem.targetoid = localrejectentry->keyitem.targetoid; - rejectentry->keyitem.databaseoid = MyDatabaseId; - rejectentry->keyitem.targettype = localrejectentry->keyitem.targettype; - rejectentry->keyitem.tablespaceoid = localrejectentry->keyitem.tablespaceoid; - rejectentry->segexceeded = localrejectentry->segexceeded; - } + rejectentry->keyitem.targetoid = localrejectentry->keyitem.targetoid; + rejectentry->keyitem.databaseoid = MyDatabaseId; + rejectentry->keyitem.targettype = localrejectentry->keyitem.targettype; + rejectentry->keyitem.tablespaceoid = localrejectentry->keyitem.tablespaceoid; + rejectentry->segexceeded = localrejectentry->segexceeded; + changed = true; + } + if (rejectentry->segexceeded != localrejectentry->segexceeded) + { + rejectentry->segexceeded = localrejectentry->segexceeded; + changed = true; } - rejectentry->segexceeded = localrejectentry->segexceeded; localrejectentry->isexceeded = false; localrejectentry->segexceeded = false; } else { + changed = true; /* db objects are removed or under quota limit in the new loop */ (void)hash_search(disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); (void)hash_search(local_disk_quota_reject_map, (void *)&localrejectentry->keyitem, HASH_REMOVE, NULL); } } LWLockRelease(diskquota_locks.reject_map_lock); + return changed; } /* From 8cbdeb66c12af9b16afda0a1ca6558be9c18b924 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 12 Dec 2022 17:10:29 +0800 Subject: [PATCH 244/330] Fix bug: rejectmap entries should not be removed by other databases. (#279) This commit fixed two bugs: - Previously, refresh_rejectmap() cleared all entries in rejectmap, including other databases' entries, which causes hardlimit can not to work correctly. - soft-limit rejectmap entries should not be added into disk_quota_reject_map on segments, otherwise, these entries may remain in segments and trigger the soft-limit incorrectly. Co-authored-by: Chen Mulong --- quotamodel.c | 38 +++++--- tests/regress/diskquota_schedule | 1 + .../expected/test_rejectmap_mul_db.out | 89 +++++++++++++++++++ tests/regress/sql/test_rejectmap_mul_db.sql | 53 +++++++++++ 4 files changed, 168 insertions(+), 13 deletions(-) create mode 100644 tests/regress/expected/test_rejectmap_mul_db.out create mode 100644 tests/regress/sql/test_rejectmap_mul_db.sql diff --git a/quotamodel.c b/quotamodel.c index 7ad3c9d4f90..13f288a239a 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -811,8 +811,13 @@ refresh_disk_quota_usage(bool is_init) flush_to_table_size(); /* copy local reject map back to shared reject map */ bool reject_map_changed = flush_local_reject_map(); - /* Dispatch rejectmap entries to segments to perform hard-limit. */ - if (diskquota_hardlimit && (reject_map_changed || hasActiveTable)) + /* + * Dispatch rejectmap entries to segments to perform hard-limit. + * If the bgworker is in init mode, the rejectmap should be refreshed anyway. + * Otherwise, only when the rejectmap is changed or the active_table_list is + * not empty the rejectmap should be dispatched to segments. + */ + if (is_init || (diskquota_hardlimit && (reject_map_changed || hasActiveTable))) dispatch_rejectmap(local_active_table_stat_map); hash_destroy(local_active_table_stat_map); } @@ -1795,7 +1800,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) int16 elem_width; bool elem_type_by_val; char elem_alignment_code; - int count; + int reject_array_count; + int active_array_count; HeapTupleHeader lt; bool segexceeded; GlobalRejectMapEntry *rejectmapentry; @@ -1832,8 +1838,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) local_rejectmap = hash_create("local_rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); get_typlenbyvalalign(rejectmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); deconstruct_array(rejectmap_array_type, rejectmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, - &datums, &nulls, &count); - for (int i = 0; i < count; ++i) + &datums, &nulls, &reject_array_count); + for (int i = 0; i < reject_array_count; ++i) { RejectMapEntry keyitem; bool isnull; @@ -1864,8 +1870,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) */ get_typlenbyvalalign(active_oid_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); deconstruct_array(active_oid_array_type, active_oid_elem_type, elem_width, elem_type_by_val, elem_alignment_code, - &datums, &nulls, &count); - for (int i = 0; i < count; ++i) + &datums, &nulls, &active_array_count); + for (int i = 0; i < active_array_count; ++i) { Oid active_oid = InvalidOid; HeapTuple tuple; @@ -2040,7 +2046,12 @@ refresh_rejectmap(PG_FUNCTION_ARGS) /* Clear rejectmap entries. */ hash_seq_init(&hash_seq, disk_quota_reject_map); while ((rejectmapentry = hash_seq_search(&hash_seq)) != NULL) + { + if (rejectmapentry->keyitem.relfilenode.dbNode != MyDatabaseId && + rejectmapentry->keyitem.databaseoid != MyDatabaseId) + continue; hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_REMOVE, NULL); + } /* Flush the content of local_rejectmap to the global rejectmap. */ hash_seq_init(&hash_seq, local_rejectmap); @@ -2048,14 +2059,15 @@ refresh_rejectmap(PG_FUNCTION_ARGS) { bool found; GlobalRejectMapEntry *new_entry; - new_entry = hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_ENTER_NULL, &found); + /* - * We don't perform soft-limit on segment servers, so we don't flush the - * rejectmap entry with a valid targetoid to the global rejectmap on segment - * servers. + * Skip soft limit reject entry. We don't perform soft-limit on segment servers, so we don't flush the + * rejectmap entry with a valid targetoid to the global rejectmap on segment servers. */ - if (!found && new_entry && !OidIsValid(rejectmapentry->keyitem.targetoid)) - memcpy(new_entry, rejectmapentry, sizeof(GlobalRejectMapEntry)); + if (OidIsValid(rejectmapentry->keyitem.targetoid)) continue; + + new_entry = hash_search(disk_quota_reject_map, &rejectmapentry->keyitem, HASH_ENTER_NULL, &found); + if (!found && new_entry) memcpy(new_entry, rejectmapentry, sizeof(GlobalRejectMapEntry)); } LWLockRelease(diskquota_locks.reject_map_lock); diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 7cbb0315f57..3d34e02b272 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -31,6 +31,7 @@ test: test_fetch_table_stat test: test_appendonly test: test_rejectmap test: test_clean_rejectmap_after_drop +test: test_rejectmap_mul_db test: test_ctas_pause test: test_ctas_role test: test_ctas_schema diff --git a/tests/regress/expected/test_rejectmap_mul_db.out b/tests/regress/expected/test_rejectmap_mul_db.out new file mode 100644 index 00000000000..40c43ae3d78 --- /dev/null +++ b/tests/regress/expected/test_rejectmap_mul_db.out @@ -0,0 +1,89 @@ +-- One db's rejectmap update should not impact on other db's rejectmap +CREATE DATABASE tjmu1; +CREATE DATABASE tjmu2; +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu1 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:6003 pid=3985762) +-- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 1 +(1 row) + +\c tjmu2 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu2 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:6003 pid=4001721) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +--\c tjmu1 +-- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 +-- The entries for tjmu1 should not be cleared +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 2 +(1 row) + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +DROP EXTENSION diskquota; +\c tjmu2 +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE tjmu1; +DROP DATABASE tjmu2; diff --git a/tests/regress/sql/test_rejectmap_mul_db.sql b/tests/regress/sql/test_rejectmap_mul_db.sql new file mode 100644 index 00000000000..3b2fd734f13 --- /dev/null +++ b/tests/regress/sql/test_rejectmap_mul_db.sql @@ -0,0 +1,53 @@ +-- One db's rejectmap update should not impact on other db's rejectmap +CREATE DATABASE tjmu1; +CREATE DATABASE tjmu2; + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +-- increase the naptime to avoid active table gets cleared by tjmu1's worker +\! gpconfig -c "diskquota.naptime" -v 1 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore + +\c tjmu1 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); +-- Trigger hard limit to dispatch rejectmap for tjmu1 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +-- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. +SELECT diskquota.pause(); +-- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + +\c tjmu2 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); +-- Trigger hard limit to dispatch rejectmap for tjmu2 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT diskquota.pause(); + +--\c tjmu1 +-- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 +-- The entries for tjmu1 should not be cleared +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpconfig -c "diskquota.naptime" -v 0 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore + +\c tjmu1 +DROP EXTENSION diskquota; +\c tjmu2 +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE tjmu1; +DROP DATABASE tjmu2; + From 138f95a7d4c2104e49a4dab74e9eac61a5614701 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 12 Dec 2022 21:14:12 +0800 Subject: [PATCH 245/330] Fix diskquota worker schedule bug (#280) When a database's diskquota bgworker is killed and the db is dropped, diskquota scheduler can not work properly. The cause is: if the scheduler failed to start a bgworker for a database, it will try it again and again forever. A different status code is returned when failing to start bg worker. And if it is failed due to the dropped database (or another other reasons causes db name cannot be retrieved from db id), just skip this bgwoker for now For other failure reasons, limit the times of starting a bgworker for a database to 3 times. If the limit is reached, skip it and pick the next one. --- diskquota.c | 79 ++++++++--- tests/regress/diskquota_schedule | 1 + .../regress/expected/test_worker_schedule.out | 2 + .../test_worker_schedule_exception.out | 123 ++++++++++++++++++ tests/regress/sql/test_worker_schedule.sql | 1 + .../sql/test_worker_schedule_exception.sql | 38 ++++++ 6 files changed, 225 insertions(+), 19 deletions(-) create mode 100644 tests/regress/expected/test_worker_schedule_exception.out create mode 100644 tests/regress/sql/test_worker_schedule_exception.sql diff --git a/diskquota.c b/diskquota.c index 2740f2768f6..519da4754d8 100644 --- a/diskquota.c +++ b/diskquota.c @@ -100,20 +100,27 @@ static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; */ BackgroundWorkerHandle **bgworker_handles; +typedef enum +{ + SUCCESS, + INVALID_DB, + NO_FREE_WORKER, + UNKNOWN, +} StartWorkerState; /* functions of disk quota*/ void _PG_init(void); void _PG_fini(void); void disk_quota_worker_main(Datum); void disk_quota_launcher_main(Datum); -static void disk_quota_sigterm(SIGNAL_ARGS); -static void disk_quota_sighup(SIGNAL_ARGS); -static void define_guc_variables(void); -static bool start_worker(DiskquotaDBEntry *dbEntry); -static void create_monitor_db_table(void); -static void add_dbid_to_database_list(Oid dbid); -static void del_dbid_from_database_list(Oid dbid); -static void process_extension_ddl_message(void); +static void disk_quota_sigterm(SIGNAL_ARGS); +static void disk_quota_sighup(SIGNAL_ARGS); +static void define_guc_variables(void); +static StartWorkerState start_worker(DiskquotaDBEntry *dbEntry); +static void create_monitor_db_table(void); +static void add_dbid_to_database_list(Oid dbid); +static void del_dbid_from_database_list(Oid dbid); +static void process_extension_ddl_message(void); static void do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_extension_ddl_message); static void terminate_all_workers(void); static void on_add_db(Oid dbid, MessageResult *code); @@ -594,6 +601,7 @@ disk_quota_launcher_main(Datum main_arg) Oid curDBId = 0; bool advance_one_db = true; bool timeout = false; + int try_times = 0; while (!got_sigterm) { int rc; @@ -601,8 +609,9 @@ disk_quota_launcher_main(Datum main_arg) /* pick a db to run */ if (advance_one_db) { - curDB = next_db(curDB); - timeout = false; + curDB = next_db(curDB); + timeout = false; + try_times = 0; if (curDB != NULL) { curDBId = curDB->dbid; @@ -728,10 +737,18 @@ disk_quota_launcher_main(Datum main_arg) */ if (TimestampDifferenceExceeds(curDB->next_run_time, GetCurrentTimestamp(), MIN_SLEEPTIME)) { - bool ret = start_worker(curDB); - advance_one_db = ret; - /* has exceeded the next_run_time of current db */ - timeout = true; + StartWorkerState ret = start_worker(curDB); + /* when start_worker successfully or db is invalid, pick up next db to run */ + advance_one_db = (ret == SUCCESS || ret == INVALID_DB) ? true : false; + if (!advance_one_db) + { + /* has exceeded the next_run_time of current db */ + timeout = true; + /* when start_worker return is not 2(no free worker), increase the try_times*/ + if (ret != NO_FREE_WORKER) try_times++; + /* only try to start bgworker for a database at most 3 times */ + if (try_times >= 3) advance_one_db = true; + } } else { @@ -1237,9 +1254,17 @@ terminate_all_workers(void) * Dynamically launch an disk quota worker process. * This function is called when launcher process * schedules a database's diskquota worker to run. + * + * return: + * SUCCESS means starting the bgworker sucessfully. + * INVALID_DB means the database is invalid + * NO_FREE_WORKER means there is no avaliable free workers + * UNKNOWN means registering or starting the bgworker + * failed, maybe there is no free bgworker, or + * forking a process failed and so on. */ -static bool +static StartWorkerState start_worker(DiskquotaDBEntry *dbEntry) { BackgroundWorker worker; @@ -1247,12 +1272,14 @@ start_worker(DiskquotaDBEntry *dbEntry) DiskQuotaWorkerEntry *dq_worker; MemoryContext old_ctx; char *dbname = NULL; + int result = SUCCESS; dq_worker = next_worker(); if (dq_worker == NULL) { elog(DEBUG1, "[diskquota] no free workers"); - return false; + result = NO_FREE_WORKER; + return result; } /* free the BackgroundWorkerHandle used by last database */ free_bgworker_handle(dq_worker->id); @@ -1279,7 +1306,11 @@ start_worker(DiskquotaDBEntry *dbEntry) sprintf(worker.bgw_library_name, DISKQUOTA_BINARY_NAME); sprintf(worker.bgw_function_name, "disk_quota_worker_main"); dbname = get_db_name(dbEntry->dbid); - if (dbname == NULL) goto Failed; + if (dbname == NULL) + { + result = INVALID_DB; + goto Failed; + } snprintf(worker.bgw_name, sizeof(worker.bgw_name), "%s", dbname); pfree(dbname); @@ -1293,6 +1324,7 @@ start_worker(DiskquotaDBEntry *dbEntry) if (!ret) { elog(WARNING, "Create bgworker failed"); + result = UNKNOWN; goto Failed; } BgwHandleStatus status; @@ -1302,6 +1334,7 @@ start_worker(DiskquotaDBEntry *dbEntry) { ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not start background process"), errhint("More details may be available in the server log."))); + result = UNKNOWN; goto Failed; } if (status == BGWH_POSTMASTER_DIED) @@ -1309,16 +1342,17 @@ start_worker(DiskquotaDBEntry *dbEntry) ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("cannot start background processes without postmaster"), errhint("Kill all remaining database processes and restart the database."))); + result = UNKNOWN; goto Failed; } Assert(status == BGWH_STARTED); - return true; + return result; Failed: elog(DEBUG1, "[diskquota] diskquota, starts diskquota failed"); FreeWorker(dq_worker); - return false; + return result; } /* @@ -1655,6 +1689,10 @@ next_db(DiskquotaDBEntry *curDB) nextSlot = curDB->id + 1; } + /* + * SearchSysCache should be run in a transaction + */ + StartTransactionCommand(); LWLockAcquire(diskquota_locks.dblist_lock, LW_SHARED); for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) { @@ -1662,10 +1700,13 @@ next_db(DiskquotaDBEntry *curDB) DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; nextSlot++; if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; + /* TODO: should release the invalid db related things */ + if (!is_valid_dbid(dbEntry->dbid)) continue; result = dbEntry; break; } LWLockRelease(diskquota_locks.dblist_lock); + CommitTransactionCommand(); return result; } diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 3d34e02b272..7722765d91f 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -40,5 +40,6 @@ test: test_ctas_tablespace_schema test: test_default_tablespace test: test_tablespace_diff_schema test: test_worker_schedule +test: test_worker_schedule_exception test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_worker_schedule.out b/tests/regress/expected/test_worker_schedule.out index 8003a4e230c..7c6fc7b89ce 100644 --- a/tests/regress/expected/test_worker_schedule.out +++ b/tests/regress/expected/test_worker_schedule.out @@ -2,6 +2,8 @@ \c DROP DATABASE IF EXISTS t1; NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping DROP DATABASE IF EXISTS t3; NOTICE: database "t3" does not exist, skipping DROP DATABASE IF EXISTS t4; diff --git a/tests/regress/expected/test_worker_schedule_exception.out b/tests/regress/expected/test_worker_schedule_exception.out new file mode 100644 index 00000000000..432e27f9943 --- /dev/null +++ b/tests/regress/expected/test_worker_schedule_exception.out @@ -0,0 +1,123 @@ +-- start_ignore +\! gpconfig -c diskquota.max_workers -v 10; +20221209:16:01:17:089154 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 10' +\! gpconfig -c diskquota.naptime -v 4; +20221209:16:01:19:089255 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 4' +\! gpstop -arf; +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Starting gpstop with args: -arf +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Gathering information and validating the environment... +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Segment details from master... +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.36.gedf0e003f8 build dev' +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing Master instance shutdown with mode='fast' +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Master segment instance directory=/Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Attempting forceful termination of any leftover master process +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Terminating processes for segment /Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:18:21:23:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Stopping master standby host wxiaoranVKGWQ.vmware.com mode=fast +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown standby process on wxiaoranVKGWQ.vmware.com +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20221209:18:21:24:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20221209:18:21:25:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments stopped successfully = 6 +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments with errors during stop = 0 +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown 6 of 6 segment instances +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Database successfully shutdown with no errors reported +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpmmon process +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpmmon process found +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpsmon processes +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20221209:18:21:26:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover shared memory +20221209:18:21:27:045673 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Restarting System... +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping +--end_ignore +CREATE DATABASE t1; +CREATE DATABASE t2; +\c t1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; +\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +-- start_ignore +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep + 503 89701 89678 0 4:01PM ?? 0:00.17 postgres: 6000, bgworker: [diskquota] - launcher + 503 89743 89678 0 4:01PM ?? 0:00.03 postgres: 6000, bgworker: [diskquota] contrib_regression cmd1 +--end_ignore +\c contrib_regression +DROP DATABASE t1; +\c t2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE t2; +--start_ignore +\! gpconfig -r diskquota.naptime; +20221209:16:02:10:089976 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-r diskquota.naptime' +\! gpconfig -r diskquota.max_workers; +20221209:16:02:12:090078 gpconfig:wxiaoranVKGWQ:wxiaoran-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Starting gpstop with args: -arf +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Gathering information and validating the environment... +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Greenplum Master catalog information +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Obtaining Segment details from master... +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.22.1+dev.36.gedf0e003f8 build dev' +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing Master instance shutdown with mode='fast' +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Master segment instance directory=/Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Attempting forceful termination of any leftover master process +20221209:16:02:12:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Terminating processes for segment /Users/wxiaoran/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20221209:16:02:13:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Stopping master standby host wxiaoranVKGWQ.vmware.com mode=fast +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown standby process on wxiaoranVKGWQ.vmware.com +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20221209:16:02:14:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-0.00% of jobs completed +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-100.00% of jobs completed +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments stopped successfully = 6 +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:- Segments with errors during stop = 0 +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:----------------------------------------------------- +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Successfully shutdown 6 of 6 segment instances +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Database successfully shutdown with no errors reported +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpmmon process +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpmmon process found +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover gpsmon processes +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20221209:16:02:15:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Cleaning up leftover shared memory +20221209:16:02:17:090179 gpstop:wxiaoranVKGWQ:wxiaoran-[INFO]:-Restarting System... +--end_ignore diff --git a/tests/regress/sql/test_worker_schedule.sql b/tests/regress/sql/test_worker_schedule.sql index f63e02f4ec2..94d27e9339b 100644 --- a/tests/regress/sql/test_worker_schedule.sql +++ b/tests/regress/sql/test_worker_schedule.sql @@ -2,6 +2,7 @@ \c DROP DATABASE IF EXISTS t1; +DROP DATABASE IF EXISTS t2; DROP DATABASE IF EXISTS t3; DROP DATABASE IF EXISTS t4; DROP DATABASE IF EXISTS t5; diff --git a/tests/regress/sql/test_worker_schedule_exception.sql b/tests/regress/sql/test_worker_schedule_exception.sql new file mode 100644 index 00000000000..83fe7faf0a0 --- /dev/null +++ b/tests/regress/sql/test_worker_schedule_exception.sql @@ -0,0 +1,38 @@ +-- start_ignore +\! gpconfig -c diskquota.max_workers -v 10; +\! gpconfig -c diskquota.naptime -v 4; +\! gpstop -arf; +\c +DROP DATABASE IF EXISTS t1; +DROP DATABASE IF EXISTS t2; +--end_ignore + +CREATE DATABASE t1; +CREATE DATABASE t2; +\c t1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; +\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +-- start_ignore +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep +--end_ignore +\c contrib_regression +DROP DATABASE t1; +\c t2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c t2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE t2; +--start_ignore +\! gpconfig -r diskquota.naptime; +\! gpconfig -r diskquota.max_workers; +\! gpstop -arf; +--end_ignore From 82fe751870bb382f790d3878e3fecb2515f64da5 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 13 Dec 2022 12:10:54 +0800 Subject: [PATCH 246/330] Bump version to 2.1.1 (#283) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7ec1d6db408..3e3c2f1e5ed 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1.0 +2.1.1 From b385672200a3c03b498fea44ea38a5ccf62e987f Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 13 Dec 2022 22:16:34 +0800 Subject: [PATCH 247/330] Fix flaky test_fast_quota_view (#282) - Drop the table space before rm directory. - '-f' to alway force rm. - '-- start-ignore' doesn't seem to be working with retcode since retcode will add '-- start/stop-ignore' pair automatically to ignore the output, and the nested start/stop ignore doesn't seem to be handled well by the ancient perl script. Refer to 'src/test/isolation2/sql_isolation_testcase.py'. Seen flaky tests as below: root@96831b9f-9150-4424-63a7-abe8f18c144e:/tmp# cat /home/gpadmin/diskquota_artifacts/tests/isolation2/regression.diffs --- \/tmp\/build\/4eceba44\/bin_diskquota\/tests\/isolation2\/expected\/test_fast_quota_view\.out 2022-12-12 13:20:56.729354016 +0000 +++ \/tmp\/build\/4eceba44\/bin_diskquota\/tests\/isolation2\/results\/test_fast_quota_view\.out 2022-12-12 13:20:56.733354401 +0000 @@ -175,9 +175,11 @@ (exited with code 0) !\retcode rm -r /tmp/spc2; GP_IGNORE:-- start_ignore +GP_IGNORE:rm: cannot remove '/tmp/spc2/6/GPDB_6_301908232/16384/16413': No such file or directory +GP_IGNORE:rm: cannot remove '/tmp/spc2/5/GPDB_6_301908232/16384/16413': No such file or directory GP_IGNORE: GP_IGNORE:-- end_ignore -(exited with code 0) +(exited with code 1) -- end_ignore DROP TABLESPACE IF EXISTS spc1; DROP --- .../expected/test_fast_quota_view.out | 18 ++++++++---------- tests/isolation2/sql/test_fast_quota_view.sql | 10 ++++------ 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/tests/isolation2/expected/test_fast_quota_view.out b/tests/isolation2/expected/test_fast_quota_view.out index 3bdc09074ff..22bde74857d 100644 --- a/tests/isolation2/expected/test_fast_quota_view.out +++ b/tests/isolation2/expected/test_fast_quota_view.out @@ -6,7 +6,6 @@ CREATE CREATE ROLE r LOGIN SUPERUSER; CREATE --- start_ignore !\retcode mkdir -p /tmp/spc1; -- start_ignore @@ -17,7 +16,7 @@ CREATE -- end_ignore (exited with code 0) --- end_ignore + DROP TABLESPACE IF EXISTS spc1; DROP CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; @@ -166,19 +165,18 @@ DROP DROP ROLE IF EXISTS r; DROP --- start_ignore -!\retcode rm -r /tmp/spc1; +DROP TABLESPACE IF EXISTS spc1; +DROP +DROP TABLESPACE IF EXISTS spc2; +DROP + +!\retcode rm -rf /tmp/spc1; -- start_ignore -- end_ignore (exited with code 0) -!\retcode rm -r /tmp/spc2; +!\retcode rm -rf /tmp/spc2; -- start_ignore -- end_ignore (exited with code 0) --- end_ignore -DROP TABLESPACE IF EXISTS spc1; -DROP -DROP TABLESPACE IF EXISTS spc2; -DROP diff --git a/tests/isolation2/sql/test_fast_quota_view.sql b/tests/isolation2/sql/test_fast_quota_view.sql index c031576b34a..24ff1f5fd74 100644 --- a/tests/isolation2/sql/test_fast_quota_view.sql +++ b/tests/isolation2/sql/test_fast_quota_view.sql @@ -3,10 +3,9 @@ CREATE SCHEMA s2; CREATE ROLE r LOGIN SUPERUSER; --- start_ignore !\retcode mkdir -p /tmp/spc1; !\retcode mkdir -p /tmp/spc2; --- end_ignore + DROP TABLESPACE IF EXISTS spc1; CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; DROP TABLESPACE IF EXISTS spc2; @@ -67,9 +66,8 @@ DROP SCHEMA IF EXISTS s1; DROP SCHEMA IF EXISTS s2; DROP ROLE IF EXISTS r; --- start_ignore -!\retcode rm -r /tmp/spc1; -!\retcode rm -r /tmp/spc2; --- end_ignore DROP TABLESPACE IF EXISTS spc1; DROP TABLESPACE IF EXISTS spc2; + +!\retcode rm -rf /tmp/spc1; +!\retcode rm -rf /tmp/spc2; From 8040acfff42586d3e80d6bb55b3d292090f2986e Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Fri, 16 Dec 2022 16:54:52 +0800 Subject: [PATCH 248/330] Fix flaky isolation2 test. (#281) Currently, isolation2/test_rejectmap.sql is flaky if we run isolation2 test multiple times. That's because we set the GUC 'diskquota.hard_limit' to 'on' in test_postmaster_restart.sql and forget to set it to 'off'. In the next following runs, the hard limit is enabled and the QD will continuously dispatch reject map to segment servers. However, test_rejectmap.sql requires the hard limit being disabled because we're dispatching rejectmap by UDF manually or the dispatched rejectmap will be cleared by QD. This patch adds a new injection point to prevent QD from dispatching rejectmap to make test_rejectmap.sql stateless. This patch also set 'diskquota.hard_limit' to 'off' when test_postmaster_restart.sql finishes. --- .../expected/test_postmaster_restart.out | 5 +++++ tests/isolation2/expected/test_rejectmap.out | 17 ++++++++--------- .../isolation2/sql/test_postmaster_restart.sql | 1 + tests/isolation2/sql/test_rejectmap.sql | 11 +++++------ 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/tests/isolation2/expected/test_postmaster_restart.out b/tests/isolation2/expected/test_postmaster_restart.out index a35cfb7fce9..5f01eee9379 100644 --- a/tests/isolation2/expected/test_postmaster_restart.out +++ b/tests/isolation2/expected/test_postmaster_restart.out @@ -132,3 +132,8 @@ CREATE 1000000 1: DROP SCHEMA postmaster_restart_s CASCADE; DROP 1q: ... +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected/test_rejectmap.out b/tests/isolation2/expected/test_rejectmap.out index 9c1de69a9ce..5e15acceb67 100644 --- a/tests/isolation2/expected/test_rejectmap.out +++ b/tests/isolation2/expected/test_rejectmap.out @@ -3,14 +3,6 @@ -- queries in smgrextend hook by relation's relfilenode. -- --- this function return valid tablespaceoid. --- For role/namespace quota, return as it is. --- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. -CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; -CREATE - - -- Enable check quota by relfilenode on seg0. SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite @@ -18,6 +10,13 @@ SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbi Success: (1 row) +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + -- 1. Test canceling the extending of an ordinary table. CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); CREATE @@ -731,7 +730,7 @@ SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[ (1 row) --- Disable check quota by relfilenode on seg0. +-- Reset fault injection points set by us at the top of this test. SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; gp_inject_fault_infinite -------------------------- diff --git a/tests/isolation2/sql/test_postmaster_restart.sql b/tests/isolation2/sql/test_postmaster_restart.sql index bc78c241c00..245fd91cb55 100644 --- a/tests/isolation2/sql/test_postmaster_restart.sql +++ b/tests/isolation2/sql/test_postmaster_restart.sql @@ -49,3 +49,4 @@ 1: DROP SCHEMA postmaster_restart_s CASCADE; 1q: +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; diff --git a/tests/isolation2/sql/test_rejectmap.sql b/tests/isolation2/sql/test_rejectmap.sql index ca2226010d9..41267c56b57 100644 --- a/tests/isolation2/sql/test_rejectmap.sql +++ b/tests/isolation2/sql/test_rejectmap.sql @@ -3,6 +3,10 @@ -- queries in smgrextend hook by relation's relfilenode. -- +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + -- this function return valid tablespaceoid. -- For role/namespace quota, return as it is. -- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. @@ -64,11 +68,6 @@ CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, END; $$ /*in func*/ LANGUAGE 'plpgsql'; - --- Enable check quota by relfilenode on seg0. -SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) - FROM gp_segment_configuration WHERE role='p' AND content=0; - -- 1. Test canceling the extending of an ordinary table. CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); INSERT INTO blocked_t1 SELECT generate_series(1, 100); @@ -562,6 +561,6 @@ SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; --- Disable check quota by relfilenode on seg0. +-- Reset fault injection points set by us at the top of this test. SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; From e095c69056891abcd933d7f139ee1a83299edfab Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 30 Jan 2023 16:02:32 +0800 Subject: [PATCH 249/330] Fix compilation to support gpdb7 (#285) * Fix diskquota on gpdb7 - Fix some compile issues, especially relstorage has been removed on gpdb7. Using relam to get the relation's storage type. - Modify diskquota hash function flag. - Fix diskquota_relation_open(). NoLock is disabled on gpdb7. - Add tests schedule and expected results for gpdb7. - Update some test expectations on gpdb7 due to AO/CO issue: As something changes about AO/CO table, the size of them is changed. - Disable some tests on gpdb7. - Disable upgrade tests. - Upgrade to diskquota 2.2. Add attribute relam to type relation_cache_detail and add a param to function relation_size_local. - Add setup.sql and setup.out for isolation2 test. - Fix bug: gpstart timeout for gpdb7. We used to set `Gp_role = GP_ROLE_DISPATCH` in disk_quota_launcher_main(), even though postmaster boots in utility mode. This seems to be nothing in gpdb6, but it will cause a dead loop when booting gpdb7. In fact, there is nothing to do in utility mode for the diskquota launcher. In this commit, if `Gp_role != GP_ROLE_DISPATCH`, disk_quota_launcher_main() will simply exit. - Add gpdb7 pipeline support. Build gpdb7 by rocky8, and test the same build with rocky8 and rhel8. 'res_test_images' has been changed to list to support this. - Add gpdb version into the task name. 'passwd' is unnecessary and ' doesn't exist in the rocky8 build image. - Use `cmake -DENABLE_UPGRADE_TEST=OFF` to disable the upgrade test. - TODO: Add upgrade test to CI pipeline. Fix activate standby error on the CI pipeline. Fix tests for gpdb7. Co-authored-by: Xiaoran Wang Co-authored-by: Xing Guo --- CMakeLists.txt | 12 +- VERSION | 2 +- cmake/Regress.cmake | 5 +- concourse/pipeline/base.lib.yml | 11 +- concourse/pipeline/commit.yml | 7 +- concourse/pipeline/dev.yml | 7 +- concourse/pipeline/job_def.lib.yml | 71 +- concourse/pipeline/pr.yml | 7 +- concourse/pipeline/release.yml | 7 +- concourse/pipeline/res_def.yml | 68 +- concourse/scripts/build_diskquota.sh | 4 +- concourse/scripts/entry.sh | 1 - concourse/scripts/test_diskquota.sh | 19 +- concourse/tasks/build_diskquota.yml | 2 +- diskquota--2.1--2.2.sql | 63 ++ diskquota--2.2--2.1.sql | 56 ++ diskquota--2.2.sql | 325 ++++++++ diskquota.c | 109 ++- diskquota.control | 2 +- diskquota.h | 31 +- diskquota_test--1.0.sql | 2 +- diskquota_utility.c | 101 ++- gp_activetable.c | 88 ++- gp_activetable.h | 2 + monitored_db.c | 6 +- quotamodel.c | 117 +-- relation_cache.c | 87 ++- relation_cache.h | 1 + tests/CMakeLists.txt | 13 +- tests/init_file | 2 + tests/isolation2/expected7/config.out | 30 + tests/isolation2/expected7/reset_config.out | 10 + tests/isolation2/expected7/setup.out | 0 .../expected7/test_create_extension.out | 15 + .../expected7/test_drop_extension.out | 12 + .../expected7/test_fast_quota_view.out | 182 +++++ .../expected7/test_per_segment_config.out | 269 +++++++ .../expected7/test_postmaster_restart.out | 139 ++++ tests/isolation2/expected7/test_rejectmap.out | 738 ++++++++++++++++++ .../expected7/test_relation_cache.out | 70 ++ .../expected7/test_relation_size.out | 87 +++ tests/isolation2/expected7/test_truncate.out | 79 ++ tests/isolation2/expected7/test_vacuum.out | 92 +++ .../expected7/test_worker_timeout.out | 38 + tests/isolation2/isolation2_schedule7 | 13 + tests/isolation2/sql/setup.sql | 0 tests/regress/diskquota_schedule7 | 46 ++ tests/regress/expected/test_appendonly.out | 4 - .../regress/expected/test_relation_cache.out | 8 - .../expected/test_uncommitted_table_size.out | 12 - tests/regress/expected7/config.out | 70 ++ tests/regress/expected7/reset_config.out | 17 + .../expected7/test_activetable_limit.out | 56 ++ tests/regress/expected7/test_appendonly.out | 72 ++ .../test_clean_rejectmap_after_drop.out | 42 + tests/regress/expected7/test_column.out | 42 + tests/regress/expected7/test_copy.out | 26 + .../expected7/test_create_extension.out | 14 + .../expected7/test_ctas_before_set_quota.out | 61 ++ .../expected7/test_ctas_no_preload_lib.out | 85 ++ tests/regress/expected7/test_ctas_pause.out | 37 + tests/regress/expected7/test_ctas_role.out | 81 ++ tests/regress/expected7/test_ctas_schema.out | 64 ++ .../expected7/test_ctas_tablespace_role.out | 78 ++ .../expected7/test_ctas_tablespace_schema.out | 74 ++ .../expected7/test_default_tablespace.out | 186 +++++ tests/regress/expected7/test_delete_quota.out | 37 + .../expected7/test_drop_after_pause.out | 64 ++ .../regress/expected7/test_drop_extension.out | 13 + tests/regress/expected7/test_drop_table.out | 34 + tests/regress/expected7/test_extension.out | 523 +++++++++++++ .../expected7/test_fast_disk_check.out | 23 + .../expected7/test_fetch_table_stat.out | 35 + tests/regress/expected7/test_index.out | 133 ++++ .../expected7/test_init_table_size_table.out | 71 ++ .../expected7/test_many_active_tables.out | 31 + tests/regress/expected7/test_mistake.out | 34 + tests/regress/expected7/test_partition.out | 63 ++ .../expected7/test_pause_and_resume.out | 70 ++ .../test_pause_and_resume_multiple_db.out | 201 +++++ .../expected7/test_primary_failure.out | 126 +++ .../expected7/test_quota_view_no_table.out | 64 ++ .../expected7/test_readiness_logged.out | 38 + tests/regress/expected7/test_recreate.out | 27 + tests/regress/expected7/test_rejectmap.out | 292 +++++++ .../expected7/test_rejectmap_mul_db.out | 92 +++ .../regress/expected7/test_relation_cache.out | 127 +++ .../regress/expected7/test_relation_size.out | 99 +++ tests/regress/expected7/test_rename.out | 71 ++ tests/regress/expected7/test_reschema.out | 39 + tests/regress/expected7/test_role.out | 138 ++++ tests/regress/expected7/test_schema.out | 109 +++ tests/regress/expected7/test_show_status.out | 67 ++ .../expected7/test_tablespace_diff_schema.out | 87 +++ .../expected7/test_tablespace_role.out | 194 +++++ .../expected7/test_tablespace_role_perseg.out | 235 ++++++ .../expected7/test_tablespace_schema.out | 147 ++++ .../test_tablespace_schema_perseg.out | 282 +++++++ tests/regress/expected7/test_temp_role.out | 40 + tests/regress/expected7/test_toast.out | 31 + tests/regress/expected7/test_truncate.out | 36 + .../expected7/test_uncommitted_table_size.out | 236 ++++++ tests/regress/expected7/test_update.out | 23 + .../expected7/test_update_db_cache.out | 64 ++ tests/regress/expected7/test_vacuum.out | 57 ++ .../expected7/test_worker_not_ready.out | 26 + .../expected7/test_worker_schedule.out | 633 +++++++++++++++ .../test_worker_schedule_exception.out | 113 +++ tests/regress/sql/test_relation_cache.sql | 1 - upgrade_test/CMakeLists.txt | 19 +- .../2.1_test_in_2.2_quota_create_in_2.1.out | 16 + upgrade_test/expected/2.2_catalog.out | 310 ++++++++ upgrade_test/expected/2.2_cleanup_quota.out | 1 + upgrade_test/expected/2.2_install.out | 13 + .../expected/2.2_migrate_to_version_2.2.out | 10 + upgrade_test/expected/2.2_set_quota.out | 63 ++ .../2.2_test_in_2.1_quota_create_in_2.2.out | 16 + upgrade_test/schedule_2.1--2.2 | 8 + upgrade_test/schedule_2.2--2.1 | 8 + .../2.1_test_in_2.2_quota_create_in_2.1.sql | 17 + upgrade_test/sql/2.2_catalog.sql | 81 ++ upgrade_test/sql/2.2_cleanup_quota.sql | 1 + upgrade_test/sql/2.2_install.sql | 17 + .../sql/2.2_migrate_to_version_2.2.sql | 8 + upgrade_test/sql/2.2_set_quota.sql | 44 ++ .../2.2_test_in_2.1_quota_create_in_2.2.sql | 16 + 126 files changed, 9286 insertions(+), 265 deletions(-) create mode 100644 diskquota--2.1--2.2.sql create mode 100644 diskquota--2.2--2.1.sql create mode 100644 diskquota--2.2.sql create mode 100644 tests/isolation2/expected7/config.out create mode 100644 tests/isolation2/expected7/reset_config.out create mode 100644 tests/isolation2/expected7/setup.out create mode 100644 tests/isolation2/expected7/test_create_extension.out create mode 100644 tests/isolation2/expected7/test_drop_extension.out create mode 100644 tests/isolation2/expected7/test_fast_quota_view.out create mode 100644 tests/isolation2/expected7/test_per_segment_config.out create mode 100644 tests/isolation2/expected7/test_postmaster_restart.out create mode 100644 tests/isolation2/expected7/test_rejectmap.out create mode 100644 tests/isolation2/expected7/test_relation_cache.out create mode 100644 tests/isolation2/expected7/test_relation_size.out create mode 100644 tests/isolation2/expected7/test_truncate.out create mode 100644 tests/isolation2/expected7/test_vacuum.out create mode 100644 tests/isolation2/expected7/test_worker_timeout.out create mode 100644 tests/isolation2/isolation2_schedule7 create mode 100644 tests/isolation2/sql/setup.sql create mode 100644 tests/regress/diskquota_schedule7 create mode 100644 tests/regress/expected7/config.out create mode 100644 tests/regress/expected7/reset_config.out create mode 100644 tests/regress/expected7/test_activetable_limit.out create mode 100644 tests/regress/expected7/test_appendonly.out create mode 100644 tests/regress/expected7/test_clean_rejectmap_after_drop.out create mode 100644 tests/regress/expected7/test_column.out create mode 100644 tests/regress/expected7/test_copy.out create mode 100644 tests/regress/expected7/test_create_extension.out create mode 100644 tests/regress/expected7/test_ctas_before_set_quota.out create mode 100644 tests/regress/expected7/test_ctas_no_preload_lib.out create mode 100644 tests/regress/expected7/test_ctas_pause.out create mode 100644 tests/regress/expected7/test_ctas_role.out create mode 100644 tests/regress/expected7/test_ctas_schema.out create mode 100644 tests/regress/expected7/test_ctas_tablespace_role.out create mode 100644 tests/regress/expected7/test_ctas_tablespace_schema.out create mode 100644 tests/regress/expected7/test_default_tablespace.out create mode 100644 tests/regress/expected7/test_delete_quota.out create mode 100644 tests/regress/expected7/test_drop_after_pause.out create mode 100644 tests/regress/expected7/test_drop_extension.out create mode 100644 tests/regress/expected7/test_drop_table.out create mode 100644 tests/regress/expected7/test_extension.out create mode 100644 tests/regress/expected7/test_fast_disk_check.out create mode 100644 tests/regress/expected7/test_fetch_table_stat.out create mode 100644 tests/regress/expected7/test_index.out create mode 100644 tests/regress/expected7/test_init_table_size_table.out create mode 100644 tests/regress/expected7/test_many_active_tables.out create mode 100644 tests/regress/expected7/test_mistake.out create mode 100644 tests/regress/expected7/test_partition.out create mode 100644 tests/regress/expected7/test_pause_and_resume.out create mode 100644 tests/regress/expected7/test_pause_and_resume_multiple_db.out create mode 100644 tests/regress/expected7/test_primary_failure.out create mode 100644 tests/regress/expected7/test_quota_view_no_table.out create mode 100644 tests/regress/expected7/test_readiness_logged.out create mode 100644 tests/regress/expected7/test_recreate.out create mode 100644 tests/regress/expected7/test_rejectmap.out create mode 100644 tests/regress/expected7/test_rejectmap_mul_db.out create mode 100644 tests/regress/expected7/test_relation_cache.out create mode 100644 tests/regress/expected7/test_relation_size.out create mode 100644 tests/regress/expected7/test_rename.out create mode 100644 tests/regress/expected7/test_reschema.out create mode 100644 tests/regress/expected7/test_role.out create mode 100644 tests/regress/expected7/test_schema.out create mode 100644 tests/regress/expected7/test_show_status.out create mode 100644 tests/regress/expected7/test_tablespace_diff_schema.out create mode 100644 tests/regress/expected7/test_tablespace_role.out create mode 100644 tests/regress/expected7/test_tablespace_role_perseg.out create mode 100644 tests/regress/expected7/test_tablespace_schema.out create mode 100644 tests/regress/expected7/test_tablespace_schema_perseg.out create mode 100644 tests/regress/expected7/test_temp_role.out create mode 100644 tests/regress/expected7/test_toast.out create mode 100644 tests/regress/expected7/test_truncate.out create mode 100644 tests/regress/expected7/test_uncommitted_table_size.out create mode 100644 tests/regress/expected7/test_update.out create mode 100644 tests/regress/expected7/test_update_db_cache.out create mode 100644 tests/regress/expected7/test_vacuum.out create mode 100644 tests/regress/expected7/test_worker_not_ready.out create mode 100644 tests/regress/expected7/test_worker_schedule.out create mode 100644 tests/regress/expected7/test_worker_schedule_exception.out create mode 100644 upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out create mode 100644 upgrade_test/expected/2.2_catalog.out create mode 100644 upgrade_test/expected/2.2_cleanup_quota.out create mode 100644 upgrade_test/expected/2.2_install.out create mode 100644 upgrade_test/expected/2.2_migrate_to_version_2.2.out create mode 100644 upgrade_test/expected/2.2_set_quota.out create mode 100644 upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out create mode 100644 upgrade_test/schedule_2.1--2.2 create mode 100644 upgrade_test/schedule_2.2--2.1 create mode 100644 upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql create mode 100644 upgrade_test/sql/2.2_catalog.sql create mode 100644 upgrade_test/sql/2.2_cleanup_quota.sql create mode 100644 upgrade_test/sql/2.2_install.sql create mode 100644 upgrade_test/sql/2.2_migrate_to_version_2.2.sql create mode 100644 upgrade_test/sql/2.2_set_quota.sql create mode 100644 upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql diff --git a/CMakeLists.txt b/CMakeLists.txt index a13d08a447b..514a7e7cb15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,7 +85,10 @@ list( diskquota--2.0--1.0.sql diskquota--2.1.sql diskquota--2.0--2.1.sql - diskquota--2.1--2.0.sql) + diskquota--2.1--2.0.sql + diskquota--2.2.sql + diskquota--2.1--2.2.sql + diskquota--2.2--2.1.sql) add_library(diskquota MODULE ${diskquota_SRC}) @@ -167,7 +170,12 @@ BuildInfo_Create(${build_info_PATH} # Add installcheck targets add_subdirectory(tests) -add_subdirectory(upgrade_test) +if(NOT DEFINED ENABLE_UPGRADE_TEST) + set(ENABLE_UPGRADE_TEST ON) +endif() +if(ENABLE_UPGRADE_TEST) + add_subdirectory(upgrade_test) +endif() # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") diff --git a/VERSION b/VERSION index 3e3c2f1e5ed..ccbccc3dc62 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1.1 +2.2.0 diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index 1726613d162..6d91c760823 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -43,9 +43,6 @@ function(_PGIsolation2Target_Add working_DIR) add_custom_target( pg_isolation2_regress - COMMAND - make -C ${PG_SRC_DIR}/src/test/isolation2 install - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PG_SRC_DIR}/src/test/isolation2/sql_isolation_testcase.py ${working_DIR} ) @@ -120,7 +117,7 @@ function(RegressTarget_Add name) endif() set(regress_command - ${regress_BIN} --psqldir='${PG_BIN_DIR}' ${regress_opts_arg} ${regress_arg}) + ${regress_BIN} ${regress_opts_arg} ${regress_arg}) if (arg_RUN_TIMES) set(test_command ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/regress_loop.sh diff --git a/concourse/pipeline/base.lib.yml b/concourse/pipeline/base.lib.yml index 3b71d4ca05b..1a62132a956 100644 --- a/concourse/pipeline/base.lib.yml +++ b/concourse/pipeline/base.lib.yml @@ -3,8 +3,15 @@ #@ def add_res_by_conf(res_map, job_conf): #@ for key in job_conf: #@ if key.startswith("res_"): -#@ res_name = job_conf[key] -#@ res_map[res_name] = True +#@ val = job_conf[key] +#@ if type(val) == "list" or type(val) == "yamlfragment": +#@ for res_name in val: +#@ res_map[res_name] = True +#@ end +#@ else: +#@ res_name = val +#@ res_map[res_name] = True +#@ end #@ end #@ end #@ end diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml index eb9bd7cc590..823dfcc2868 100644 --- a/concourse/pipeline/commit.yml +++ b/concourse/pipeline/commit.yml @@ -4,7 +4,8 @@ #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", #@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf") +#@ "ubuntu18_gpdb6_conf", +#@ "rhel8_gpdb7_conf",) #@ load("trigger_def.lib.yml", #@ "commit_trigger", #@ ) @@ -17,7 +18,8 @@ #@ centos6_gpdb6_conf(), #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf() +#@ ubuntu18_gpdb6_conf(), +#@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { @@ -29,7 +31,6 @@ jobs: #@ param = { #@ "res_map": res_map, #@ "trigger": trigger, -#@ "gpdb_src": "gpdb6_src", #@ "conf": conf #@ } - #@ build_test_job(param) diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml index 93a1a252f9d..009eb2167b7 100644 --- a/concourse/pipeline/dev.yml +++ b/concourse/pipeline/dev.yml @@ -4,7 +4,8 @@ #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", #@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf") +#@ "ubuntu18_gpdb6_conf", +#@ "rhel8_gpdb7_conf") #@ load("trigger_def.lib.yml", #@ "commit_dev_trigger", #@ ) @@ -17,7 +18,8 @@ #@ centos6_gpdb6_conf(release_build=False), #@ centos7_gpdb6_conf(release_build=False), #@ rhel8_gpdb6_conf(release_build=False), -#@ ubuntu18_gpdb6_conf(release_build=False) +#@ ubuntu18_gpdb6_conf(release_build=False), +#@ rhel8_gpdb7_conf(release_build=False), #@ ] jobs: #@ param = { @@ -29,7 +31,6 @@ jobs: #@ param = { #@ "res_map": res_map, #@ "trigger": trigger, -#@ "gpdb_src": "gpdb6_src", #@ "conf": conf #@ } - #@ build_test_job(param) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 3a682739135..b20c4e5e6bf 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -13,48 +13,65 @@ #! Use bin_gpdb_postfix="" to use a release version of gpdb binary #@ def centos6_gpdb6_conf(release_build=False): res_build_image: centos6-gpdb6-image-build -res_test_image: centos6-gpdb6-image-test +res_test_images: [centos6-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel6 +#! res_diskquota_bin: bin_diskquota_gpdb6_rhel6 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel6_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel6_release os: rhel6 +gpdb_ver: 6 build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for centos7 #@ def centos7_gpdb6_conf(release_build=False): res_build_image: centos7-gpdb6-image-build -res_test_image: centos7-gpdb6-image-test +res_test_images: [centos7-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel7 +#! res_diskquota_bin: bin_diskquota_gpdb6_rhel7 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel7_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel7_release os: rhel7 +gpdb_ver: 6 build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for rhel8 #@ def rhel8_gpdb6_conf(release_build=False): res_build_image: rhel8-gpdb6-image-build -res_test_image: rhel8-gpdb6-image-test +res_test_images: [rhel8-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel8 +#! res_diskquota_bin: bin_diskquota_gpdb6_rhel8 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel8_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel8_release os: rhel8 +gpdb_ver: 6 build_type: #@ "Release" if release_build else "Debug" #@ end #! Job config for ubuntu18 #@ def ubuntu18_gpdb6_conf(release_build=False): res_build_image: ubuntu18-gpdb6-image-build -res_test_image: ubuntu18-gpdb6-image-test +res_test_images: [ubuntu18-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 +#! res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_ubuntu18_intermediates", release_build) release_bin: bin_diskquota_gpdb6_ubuntu18_release os: ubuntu18.04 +gpdb_ver: 6 +build_type: #@ "Release" if release_build else "Debug" +#@ end + +#! Job config for GPDB7, rhel8 +#@ def rhel8_gpdb7_conf(release_build=False): +res_build_image: rocky8-gpdb7-image-build +res_test_images: [rocky8-gpdb7-image-test, rhel8-gpdb7-image-test] +res_gpdb_bin: #@ "bin_gpdb7_rhel8" + ("" if release_build else "_debug") +#! res_diskquota_bin: bin_diskquota_gpdb7_rhel8 +res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb7_rhel8_intermediates", release_build) +release_bin: bin_diskquota_gpdb7_rhel8_release +os: rhel8 +gpdb_ver: 7 build_type: #@ "Release" if release_build else "Debug" #@ end @@ -164,25 +181,31 @@ params: #@ end #@ def _test_task(conf): -task: #@ "test_" + conf["os"] -timeout: 2h -file: diskquota_src/concourse/tasks/test_diskquota.yml -image: #@ conf["res_test_image"] -input_mapping: - bin_gpdb: #@ conf["res_gpdb_bin"] - bin_diskquota: diskquota_artifacts -params: - DISKQUOTA_OS: #@ conf["os"] +#@ images = conf['res_test_images'] +in_parallel: +#@ for image in images: +#@ test_os = image.split("-")[0] + - task: #@ "test_" + test_os + timeout: 2h + file: diskquota_src/concourse/tasks/test_diskquota.yml + image: #@ image + input_mapping: + bin_gpdb: #@ conf["res_gpdb_bin"] + bin_diskquota: diskquota_artifacts + params: + DISKQUOTA_OS: #@ conf["os"] +#@ end #@ end #@ def build_test_job_name(conf): -#@ return "build_test_" + conf["os"] +#@ return "build_test_gpdb{}_{}".format(conf["gpdb_ver"], conf["os"]) #@ end #@ def build_test_job(param): #@ res_map = param["res_map"] #@ trigger = param["trigger"] #@ conf = param["conf"] -#@ add_res_by_name(res_map, param["gpdb_src"]) +#@ res_gpdb_src = "gpdb{}_src".format(conf['gpdb_ver']) +#@ add_res_by_name(res_map, res_gpdb_src) #@ add_res_by_name(res_map, "bin_cmake") #@ add_res_by_name(res_map, "bin_diskquota_intermediates") #@ add_res_by_conf(res_map, conf) @@ -198,13 +221,15 @@ plan: #@ end - in_parallel: - get: gpdb_src - resource: #@ param["gpdb_src"] + resource: #@ res_gpdb_src - get: bin_cmake - get: #@ conf["res_build_image"] - - get: #@ conf["res_test_image"] +#@ for test_image in conf["res_test_images"]: + - get: #@ test_image +#@ end - get: #@ conf["res_gpdb_bin"] - - get: last_released_diskquota_bin - resource: #@ conf["res_diskquota_bin"] + #! - get: last_released_diskquota_bin + #! resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) - #@ _test_task(conf) - put: #@ conf["res_intermediates_bin"] diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index bb2f273ac9c..4a715120c24 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -5,7 +5,8 @@ #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", #@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf" +#@ "ubuntu18_gpdb6_conf", +#@ "rhel8_gpdb7_conf" #@ ) #@ load("trigger_def.lib.yml", #@ "pr_trigger", @@ -20,7 +21,8 @@ #@ centos6_gpdb6_conf(), #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf() +#@ ubuntu18_gpdb6_conf(), +#@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { @@ -32,7 +34,6 @@ jobs: #@ param = { #@ "res_map": res_map, #@ "trigger": trigger, -#@ "gpdb_src": "gpdb6_src", #@ "conf": conf #@ } - #@ build_test_job(param) diff --git a/concourse/pipeline/release.yml b/concourse/pipeline/release.yml index 4ba0394203b..023e86bd88f 100644 --- a/concourse/pipeline/release.yml +++ b/concourse/pipeline/release.yml @@ -5,7 +5,8 @@ #@ "centos6_gpdb6_conf", #@ "centos7_gpdb6_conf", #@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf") +#@ "ubuntu18_gpdb6_conf", +#@ "rhel8_gpdb7_conf") #@ load("trigger_def.lib.yml", #@ "release_trigger", #@ ) @@ -18,7 +19,8 @@ #@ centos6_gpdb6_conf(release_build=True), #@ centos7_gpdb6_conf(release_build=True), #@ rhel8_gpdb6_conf(release_build=True), -#@ ubuntu18_gpdb6_conf(release_build=True) +#@ ubuntu18_gpdb6_conf(release_build=True), +#@ rhel8_gpdb7_conf(release_build=True) #@ ] jobs: #@ param = { @@ -30,7 +32,6 @@ jobs: #@ param = { #@ "res_map": res_map, #@ "trigger": trigger, -#@ "gpdb_src": "gpdb6_src", #@ "conf": conf #@ } - #@ build_test_job(param) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index ab42312aaab..002130c8cf1 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -1,6 +1,6 @@ resource_types: - name: gcs - type: registry-image + type: docker-image check_every: 1h source: repository: frodenas/gcs-resource @@ -55,6 +55,11 @@ resources: source: branch: 6X_STABLE uri: https://github.com/greenplum-db/gpdb.git +- name: gpdb7_src + type: git + source: + branch: main + uri: https://github.com/greenplum-db/gpdb.git # Image Resources # centos6 @@ -105,6 +110,26 @@ resources: source: repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test tag: latest +# GPDB7 +# build +- name: rocky8-gpdb7-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-build + tag: latest +# test +- name: rocky8-gpdb7-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test + tag: latest +- name: rhel8-gpdb7-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb7-rhel8-test + tag: latest + username: _json_key + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) # gpdb binary on gcs is located as different folder for different version # Latest build with assertion enabled: @@ -133,6 +158,13 @@ resources: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.debug.tar.gz +- name: bin_gpdb7_rhel8_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.debug.tar.gz + # Latest release candidates, no fault-injector, no assertion: # --disable-debug-extensions --disable-tap-tests --enable-ic-proxy - name: bin_gpdb6_centos6 @@ -159,6 +191,12 @@ resources: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.tar.gz +- name: bin_gpdb7_rhel8 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 @@ -189,6 +227,13 @@ resources: json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz +- name: bin_diskquota_gpdb7_rhel8 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb7/diskquota-(.*)-rhel8_x86_64.tar.gz + # For uploading every build to gcs # Dev - name: bin_diskquota_gpdb6_rhel6_intermediates @@ -219,6 +264,13 @@ resources: json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz +- name: bin_diskquota_gpdb7_rhel8_intermediates + type: gcs + source: + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) + versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb7.tar.gz + # Rel - name: bin_diskquota_gpdb6_rhel6_intermediates_rel type: gcs @@ -248,6 +300,13 @@ resources: json_key: ((extension/extensions-gcs-service-account-key-dev2)) versioned_file: intermediates_release/diskquota/diskquota_ubuntu18_gpdb6.tar.gz +- name: bin_diskquota_gpdb7_rhel8_intermediates_rel + type: gcs + source: + bucket: gp-extensions-ci + json_key: ((extension/extensions-gcs-service-account-key-dev2)) + versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb7.tar.gz + # For uploading to the release bucket - name: bin_diskquota_gpdb6_rhel6_release type: gcs @@ -277,6 +336,13 @@ resources: json_key: ((concourse-gcs-resources-service-account-key)) regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz +- name: bin_diskquota_gpdb7_rhel8_release + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: diskquota/released/gpdb7/diskquota-(.*).tar.gz + # Other dependencies - name: bin_cmake type: gcs diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index ed87ced6dde..3ca3efbc970 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -12,10 +12,10 @@ function pkg() { pushd /home/gpadmin/diskquota_artifacts local last_release_path - last_release_path=$(readlink -e /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) + # last_release_path=$(readlink -e /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) cmake /home/gpadmin/diskquota_src \ - -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" + # -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ cmake --build . --target create_artifact popd } diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh index 4ff2a78b272..b3c70ddd653 100755 --- a/concourse/scripts/entry.sh +++ b/concourse/scripts/entry.sh @@ -105,7 +105,6 @@ setup_gpadmin() { ;; *) echo "Unknown OS: $test_os"; exit 1 ;; esac - echo -e "password\npassword" | passwd gpadmin fi mkdir -p /home/gpadmin chown gpadmin:gpadmin /home/gpadmin diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 07033b39996..12566032570 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -4,24 +4,33 @@ set -exo pipefail function activate_standby() { gpstop -may -M immediate - export PGPORT=6001 + if [[ $PGPORT -eq 6000 ]] + then + export PGPORT=6001 + else + export PGPORT=7001 + fi export MASTER_DATA_DIRECTORY=/home/gpadmin/gpdb_src/gpAux/gpdemo/datadirs/standby - gpactivatestandby -ad $MASTER_DATA_DIRECTORY + gpactivatestandby -a -f -d $MASTER_DATA_DIRECTORY } function _main() { tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C /usr/local/greenplum-db-devel source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh + pushd /home/gpadmin/gpdb_src + make -C src/test/isolation2 install + popd + pushd /home/gpadmin/diskquota_artifacts # Show regress diff if test fails export SHOW_REGRESS_DIFF=1 time cmake --build . --target installcheck # Run test again with standby master - activate_standby - time cmake --build . --target installcheck + # activate_standby + # time cmake --build . --target installcheck # Run upgrade test (with standby master) - time cmake --build . --target upgradecheck + # time cmake --build . --target upgradecheck popd } diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index cacf0fb2c9b..ba71054fd1b 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -6,7 +6,7 @@ inputs: - name: diskquota_src - name: gpdb_src - name: bin_cmake - - name: last_released_diskquota_bin + # - name: last_released_diskquota_bin outputs: - name: diskquota_artifacts diff --git a/diskquota--2.1--2.2.sql b/diskquota--2.1--2.2.sql new file mode 100644 index 00000000000..247847e1f48 --- /dev/null +++ b/diskquota--2.1--2.2.sql @@ -0,0 +1,63 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.2.so + +-- TYPE +ALTER TYPE diskquota.relation_cache_detail ADD ATTRIBUTE RELAM OID; +-- TYPE END + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; + +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +DROP FUNCTION IF EXISTS diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char"); +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + +-- VIEW +CREATE OR REPLACE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; +-- VIEW diff --git a/diskquota--2.2--2.1.sql b/diskquota--2.2--2.1.sql new file mode 100644 index 00000000000..43d384aa457 --- /dev/null +++ b/diskquota--2.2--2.1.sql @@ -0,0 +1,56 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.1.so + +-- TYPE +ALTER TYPE diskquota.relation_cache_detail DROP ATTRIBUTE RELAM; +-- TYPE END + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; /* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +DROP FUNCTION IF EXISTS diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid); +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; + + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + +-- VIEW +CREATE OR REPLACE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; +-- VIEW diff --git a/diskquota--2.2.sql b/diskquota--2.2.sql new file mode 100644 index 00000000000..49a4b1dbe32 --- /dev/null +++ b/diskquota--2.2.sql @@ -0,0 +1,325 @@ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT 0, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[], + RELAM oid +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +CREATE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- view end + +-- prepare to boot +INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT FROM diskquota.resume(); + + +--- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/diskquota.c b/diskquota.c index 519da4754d8..4581a29e725 100644 --- a/diskquota.c +++ b/diskquota.c @@ -21,6 +21,7 @@ #include "postgres.h" #include "funcapi.h" +#include "pgstat.h" #include "access/xact.h" #include "cdb/cdbgang.h" #include "cdb/cdbvars.h" @@ -140,7 +141,10 @@ static void vacuum_db_entry(DiskquotaDBEntry *db); static void init_bgworker_handles(void); static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); -static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); +#if GP_VERSION_NUM < 70000 +/* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ +static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); +#endif /* GP_VERSION_NUM */ /* * diskquota_launcher_shmem_size @@ -346,11 +350,16 @@ disk_quota_worker_main(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); +#if GP_VERSION_NUM < 70000 /* Connect to our database */ BackgroundWorkerInitializeConnection(dbname, NULL); - set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0); +#else + BackgroundWorkerInitializeConnection(dbname, NULL, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0, true); +#endif /* GP_VERSION_NUM */ /* diskquota worker should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; @@ -372,8 +381,10 @@ disk_quota_worker_main(Datum main_arg) int has_error = worker_spi_get_extension_version(&major, &minor) != 0; if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) break; +#if GP_VERSION_NUM < 70000 + /* MemoryAccount has been removed on gpdb7 */ MemoryAccounting_Reset(); - +#endif /* GP_VERSION_NUM */ if (has_error) { static char _errfmt[] = "find issues in pg_class.pg_extension check server log. waited %d seconds", @@ -395,8 +406,8 @@ disk_quota_worker_main(Datum main_arg) errhint("run alter extension diskquota update to \"%d.%d\"", DISKQUOTA_MAJOR_VERSION, DISKQUOTA_MINOR_VERSION))); - int rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); + int rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); if (rc & WL_POSTMASTER_DEATH) { @@ -438,13 +449,16 @@ disk_quota_worker_main(Datum main_arg) break; } +#if GP_VERSION_NUM < 70000 MemoryAccounting_Reset(); +#endif /* GP_VERSION_NUM */ if (is_ready) { update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_UNREADY); is_ready = false; } - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true @@ -489,7 +503,9 @@ disk_quota_worker_main(Datum main_arg) // GPDB6 opend a MemoryAccount for us without asking us. // and GPDB6 did not release the MemoryAccount after SPI finish. // Reset the MemoryAccount although we never create it. +#if GP_VERSION_NUM < 70000 MemoryAccounting_Reset(); +#endif /* GP_VERSION_NUM */ if (DiskquotaLauncherShmem->isDynamicWorker) { break; @@ -502,7 +518,8 @@ disk_quota_worker_main(Datum main_arg) * necessary, but is awakened if postmaster dies. That way the * background process goes away immediately in an emergency. */ - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, diskquota_naptime * 1000L); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime * 1000L); ResetLatch(&MyProc->procLatch); // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true @@ -555,6 +572,13 @@ void disk_quota_launcher_main(Datum main_arg) { time_t loop_begin, loop_end; + + /* the launcher should exit when the master boots in utility mode */ + if (Gp_role != GP_ROLE_DISPATCH) + { + proc_exit(0); + } + MemoryContextSwitchTo(TopMemoryContext); init_bgworker_handles(); @@ -573,13 +597,17 @@ disk_quota_launcher_main(Datum main_arg) * connect to our database 'diskquota'. launcher process will exit if * 'diskquota' database is not existed. */ - BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); +#if GP_VERSION_NUM < 70000 + /* Connect to our database */ + BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0); - - /* diskquota launcher should has Gp_role as dispatcher */ - Gp_role = GP_ROLE_DISPATCH; +#else + BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL, 0); + set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, + 0, true); +#endif /* GP_VERSION_NUM */ /* * use table diskquota_namespace.database_list to store diskquota enabled @@ -670,8 +698,8 @@ disk_quota_launcher_main(Datum main_arg) if (nap.tv_sec != 0 || nap.tv_usec != 0) { elog(DEBUG1, "[diskquota] naptime sec:%ld, usec:%ld", nap.tv_sec, nap.tv_usec); - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + (nap.tv_sec * 1000L) + (nap.tv_usec / 1000L)); ResetLatch(&MyProc->procLatch); /* Emergency bailout if postmaster has died */ @@ -830,8 +858,9 @@ create_monitor_db_table(void) ret_code = SPI_execute(sql, false, 0); if (ret_code != SPI_OK_UTILITY) { + int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] SPI_execute error, sql: \"%s\", reason: %s, ret_code: %d.", - sql, strerror(errno), ret_code))); + sql, strerror(saved_errno), ret_code))); } } PG_CATCH(); @@ -878,21 +907,36 @@ init_database_list(void) ret = SPI_connect(); if (ret != SPI_OK_CONNECT) - ereport(ERROR, - (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", strerror(errno), ret))); + { + int saved_errno = errno; + ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", strerror(saved_errno), ret))); + } ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) + { + int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', reason: %s, return code: %d.", - strerror(errno), ret))); + strerror(saved_errno), + ret))); + } tupdesc = SPI_tuptable->tupdesc; +#if GP_VERSION_NUM < 70000 if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) { ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); } +#else + if (tupdesc->natts != 1 || tupdesc->attrs[0].atttypid != OIDOID) + { + ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", + tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0].atttypid))); + ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); + } +#endif /* GP_VERSION_NUM */ for (i = 0; i < SPI_processed; i++) { HeapTuple tup; @@ -965,7 +1009,9 @@ process_extension_ddl_message() (errmsg("[diskquota launcher]: received create/drop extension diskquota message, extension launcher"))); do_process_extension_ddl_message(&code, local_extension_ddl_message); +#if GP_VERSION_NUM < 70000 MemoryAccounting_Reset(); +#endif /* GP_VERSION_NUM */ /* Send createdrop extension diskquota result back to QD */ LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); @@ -1184,9 +1230,13 @@ add_dbid_to_database_list(Oid dbid) true, 0); if (ret != SPI_OK_SELECT) + { + int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] error occured while checking database_list, " " code: %d, reason: %s.", - ret, strerror(errno)))); + ret, + strerror(saved_errno)))); + } if (SPI_processed == 1) { @@ -1200,9 +1250,13 @@ add_dbid_to_database_list(Oid dbid) 0); if (ret != SPI_OK_INSERT || SPI_processed != 1) + { + int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] error occured while updating database_list, " " code: %d, reason: %s.", - ret, strerror(errno)))); + ret, + strerror(saved_errno)))); + } return; } @@ -1225,10 +1279,12 @@ del_dbid_from_database_list(Oid dbid) ObjectIdGetDatum(dbid), }, NULL, false, 0); - - ereportif(ret != SPI_OK_DELETE, ERROR, - (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", strerror(errno), - ret))); + if (ret != SPI_OK_DELETE) + { + int saved_errno = errno; + ereport(ERROR, + (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", strerror(saved_errno), ret))); + } } /* @@ -1482,11 +1538,12 @@ diskquota_status(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { + TupleDesc tupdesc; funcctx = SRF_FIRSTCALL_INIT(); MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); { - TupleDesc tupdesc = CreateTemplateTupleDesc(2, false); + tupdesc = DiskquotaCreateTemplateTupleDesc(2); TupleDescInitEntry(tupdesc, 1, "name", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, 2, "status", TEXTOID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); @@ -1801,6 +1858,7 @@ free_bgworker_handle(uint32 worker_id) } } +#if GP_VERSION_NUM < 70000 static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) { @@ -1816,7 +1874,7 @@ WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) status = GetBackgroundWorkerPid(handle, &pid); if (status == BGWH_STOPPED) break; - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, 0); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, 0); if (rc & WL_POSTMASTER_DEATH) { @@ -1829,3 +1887,4 @@ WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) return status; } +#endif /* GP_VERSION_NUM */ diff --git a/diskquota.control b/diskquota.control index 628714b5201..32b4809fde1 100644 --- a/diskquota.control +++ b/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' -default_version = '2.1' +default_version = '2.2' module_pathname = 'do-not-use-this' relocatable = true diff --git a/diskquota.h b/diskquota.h index 28008fc1cc3..a52037cf72e 100644 --- a/diskquota.h +++ b/diskquota.h @@ -17,7 +17,9 @@ #include "postgres.h" #include "port/atomics.h" +#include "catalog/pg_class.h" #include "lib/ilist.h" +#include "lib/stringinfo.h" #include "fmgr.h" #include "storage/lock.h" #include "storage/lwlock.h" @@ -36,6 +38,12 @@ #define MAX_NUM_TABLE_SIZE_ENTRIES (diskquota_max_table_segments / SEGMENT_SIZE_ARRAY_LENGTH) /* length of segment size array in TableSizeEntry */ #define SEGMENT_SIZE_ARRAY_LENGTH 100 +typedef enum +{ + DISKQUOTA_TAG_HASH = 0, + DISKQUOTA_OID_HASH, + DISKQUOTA_STRING_HASH, +} DiskquotaHashFunction; /* max number of monitored database with diskquota enabled */ #define MAX_NUM_MONITORED_DB 50 @@ -43,6 +51,23 @@ #define EXTENSION_SCHEMA "diskquota" extern int diskquota_worker_timeout; +#if GP_VERSION_NUM < 70000 +#define TableIsHeap(relstorage, relam) ((bool)(relstorage == RELSTORAGE_HEAP)) +#define TableIsAoRows(relstorage, relam) ((bool)(relstorage == RELSTORAGE_AOROWS)) +#define TableIsAoCols(relstorage, relam) ((bool)(relstorage == RELSTORAGE_AOCOLS)) +#define DiskquotaCreateTemplateTupleDesc(natts) CreateTemplateTupleDesc(natts, false /*hasoid*/) +#define DiskquotaWaitLatch(latch, wakeEvents, timeout) WaitLatch(latch, wakeEvents, timeout) +#define DiskquotaGetRelstorage(classForm) (classForm->relstorage) +#else +#define TableIsHeap(relstorage, relam) \ + ((bool)(relam != 0 && relam != AO_ROW_TABLE_AM_OID && relam != AO_COLUMN_TABLE_AM_OID)) +#define TableIsAoRows(relstorage, relam) ((bool)(relam == AO_ROW_TABLE_AM_OID)) +#define TableIsAoCols(relstorage, relam) ((bool)(relam == AO_COLUMN_TABLE_AM_OID)) +#define DiskquotaCreateTemplateTupleDesc(natts) CreateTemplateTupleDesc(natts); +#define DiskquotaWaitLatch(latch, wakeEvents, timeout) WaitLatch(latch, wakeEvents, timeout, WAIT_EVENT_PG_SLEEP) +#define DiskquotaGetRelstorage(classForm) (0) +#endif /* GP_VERSION_NUM */ + typedef enum { NAMESPACE_QUOTA = 0, @@ -239,7 +264,7 @@ extern int SEGCOUNT; extern int worker_spi_get_extension_version(int *major, int *minor); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); -extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage); +extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); extern List *diskquota_get_index_list(Oid relid); @@ -259,4 +284,8 @@ extern void update_monitor_db(Oid dbid, FetchTableStatType action); extern void update_monitor_db_mpp(Oid dbid, FetchTableStatType action, const char *schema); extern void diskquota_stop_worker(void); extern void update_monitordb_status(Oid dbid, uint32 status); +extern HTAB *diskquota_hash_create(const char *tabname, long nelem, HASHCTL *info, int flags, + DiskquotaHashFunction hashFunction); +extern HTAB *DiskquotaShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags, + DiskquotaHashFunction hash_function); #endif diff --git a/diskquota_test--1.0.sql b/diskquota_test--1.0.sql index 9ef874d502a..f5e39444aa9 100644 --- a/diskquota_test--1.0.sql +++ b/diskquota_test--1.0.sql @@ -27,7 +27,7 @@ CREATE TYPE diskquota_test.db_status AS ( "epoch" int8, "paused" bool ); -CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.1.so', 'db_status' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.2.so', 'db_status' LANGUAGE C VOLATILE; CREATE FUNCTION diskquota_test.cur_db_status() RETURNS diskquota_test.db_status AS $$ SELECT * from diskquota_test.db_status() where datname = current_database(); $$ LANGUAGE SQL; diff --git a/diskquota_utility.c b/diskquota_utility.c index 93fcabb3d7a..6e55ee3cc5a 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -22,6 +22,11 @@ #include "access/aomd.h" #include "access/xact.h" +#include "access/heapam.h" +#if GP_VERSION_NUM >= 70000 +#include "access/genam.h" +#include "common/hashfn.h" +#endif /* GP_VERSION_NUM */ #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_authid.h" @@ -34,6 +39,7 @@ #include "commands/tablespace.h" #include "executor/spi.h" #include "nodes/makefuncs.h" +#include "pgstat.h" #include "storage/proc.h" #include "utils/snapmgr.h" #include "utils/builtins.h" @@ -202,9 +208,13 @@ init_table_size_table(PG_FUNCTION_ARGS) static HTAB * calculate_all_table_size() { - Relation classRel; - HeapTuple tuple; - HeapScanDesc relScan; + Relation classRel; + HeapTuple tuple; +#if GP_VERSION_NUM < 70000 + HeapScanDesc relScan; +#else + TableScanDesc relScan; +#endif /* GP_VERSION_NUM */ Oid relid; Oid prelid; Size tablesize; @@ -214,18 +224,22 @@ calculate_all_table_size() HASHCTL hashctl; DiskQuotaActiveTableEntry *entry; bool found; + char relstorage; memset(&hashctl, 0, sizeof(hashctl)); hashctl.keysize = sizeof(TableEntryKey); hashctl.entrysize = sizeof(DiskQuotaActiveTableEntry); hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; local_table_size_map = - hash_create("local_table_size_map", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); - + diskquota_hash_create("local_table_size_map", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); classRel = heap_open(RelationRelationId, AccessShareLock); - relScan = heap_beginscan_catalog(classRel, 0, NULL); +#if GP_VERSION_NUM < 70000 + relScan = heap_beginscan_catalog(classRel, 0, NULL); +#else + relScan = table_beginscan_catalog(classRel, 0, NULL); +#endif /* GP_VERSION_NUM */ + while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { Form_pg_class classForm = (Form_pg_class)GETSTRUCT(tuple); @@ -235,7 +249,11 @@ calculate_all_table_size() classForm->relkind != RELKIND_TOASTVALUE) continue; +#if GP_VERSION_NUM < 70000 relid = HeapTupleGetOid(tuple); +#else + relid = classForm->oid; +#endif /* GP_VERSION_NUM */ /* ignore system table */ if (relid < FirstNormalObjectId) continue; @@ -243,7 +261,9 @@ calculate_all_table_size() rnode.node.relNode = classForm->relfilenode; rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; rnode.backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; - tablesize = calculate_relation_size_all_forks(&rnode, classForm->relstorage); + relstorage = DiskquotaGetRelstorage(classForm); + + tablesize = calculate_relation_size_all_forks(&rnode, relstorage, classForm->relam); keyitem.reloid = relid; keyitem.segid = GpIdentity.segindex; @@ -288,8 +308,7 @@ pull_all_table_size(PG_FUNCTION_ARGS) /* Switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - tupdesc = CreateTemplateTupleDesc(3, false /*hasoid*/); + tupdesc = DiskquotaCreateTemplateTupleDesc(3); TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLEID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)2, "SIZE", INT8OID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)3, "SEGID", INT2OID, -1 /*typmod*/, 0 /*attdim*/); @@ -360,7 +379,7 @@ diskquota_start_worker(PG_FUNCTION_ARGS) while (count-- > 0) { CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); @@ -488,7 +507,10 @@ is_database_empty(void) " and relkind not in ('v', 'c', 'f')", true, 0); if (ret != SPI_OK_SELECT) - elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(errno)); + { + int saved_errno = errno; + elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(saved_errno)); + } tupdesc = SPI_tuptable->tupdesc; /* check sql return value whether database is empty */ @@ -543,7 +565,7 @@ diskquota_stop_worker(void) while (count-- > 0) { CHECK_FOR_INTERRUPTS(); - rc = WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); + rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 100L); if (rc & WL_POSTMASTER_DEATH) break; ResetLatch(&MyProc->procLatch); @@ -1340,7 +1362,10 @@ relation_file_stat(int segno, void *ctx) if (stat(file_path, &fst) < 0) { if (errno != ENOENT) - ereport(WARNING, (errcode_for_file_access(), errmsg("[diskquota] could not stat file %s: %m", file_path))); + { + int saved_errno = errno; + ereport(WARNING, (errcode_for_file_access(), errmsg("[diskquota] could not stat file %s: %s", file_path, strerror(saved_errno)))); + } return false; } stat_ctx->size += fst.st_size; @@ -1352,13 +1377,13 @@ relation_file_stat(int segno, void *ctx) * This function is following calculate_relation_size() */ int64 -calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) +calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam) { int64 totalsize = 0; ForkNumber forkNum; unsigned int segno = 0; - if (relstorage == RELSTORAGE_HEAP) + if (TableIsHeap(relstorage, relam)) { for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) { @@ -1373,7 +1398,7 @@ calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage) } return totalsize; } - else if (relstorage == RELSTORAGE_AOROWS || relstorage == RELSTORAGE_AOCOLS) + else if (TableIsAoRows(relstorage, relam) || TableIsAoCols(relstorage, relam)) { RelationFileStatCtx ctx = {0}; ctx.relation_path = relpathbackend(rnode->node, rnode->backend, MAIN_FORKNUM); @@ -1400,6 +1425,7 @@ relation_size_local(PG_FUNCTION_ARGS) Oid relfilenode = PG_GETARG_OID(1); char relpersistence = PG_GETARG_CHAR(2); char relstorage = PG_GETARG_CHAR(3); + Oid relam = PG_GETARG_OID(4); RelFileNodeBackend rnode = {0}; int64 size = 0; @@ -1408,7 +1434,7 @@ relation_size_local(PG_FUNCTION_ARGS) rnode.node.spcNode = OidIsValid(reltablespace) ? reltablespace : MyDatabaseTableSpace; rnode.backend = relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; - size = calculate_relation_size_all_forks(&rnode, relstorage); + size = calculate_relation_size_all_forks(&rnode, relstorage, relam); PG_RETURN_INT64(size); } @@ -1463,7 +1489,7 @@ diskquota_get_index_list(Oid relid) * HOT-safety decisions. It's unsafe to touch such an index at all * since its catalog entries could disappear at any instant. */ - if (!IndexIsLive(index)) continue; + if (!index->indislive) continue; /* Add index's OID to result list in the proper order */ result = lappend_oid(result, index->indexrelid); @@ -1609,3 +1635,40 @@ check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Can not set disk quota for system owner: %s", rolname))); } + +HTAB * +diskquota_hash_create(const char *tabname, long nelem, HASHCTL *info, int flags, DiskquotaHashFunction hashFunction) +{ +#if GP_VERSION_NUM < 70000 + if (hashFunction == DISKQUOTA_TAG_HASH) + info->hash = tag_hash; + else if (hashFunction == DISKQUOTA_OID_HASH) + info->hash = oid_hash; + else + info->hash = string_hash; + return hash_create(tabname, nelem, info, flags | HASH_FUNCTION); +#else + return hash_create(tabname, nelem, info, flags | HASH_BLOBS); +#endif /* GP_VERSION_NUM */ +} + +HTAB * +DiskquotaShmemInitHash(const char *name, /* table string name for shmem index */ + long init_size, /* initial table size */ + long max_size, /* max size of the table */ + HASHCTL *infoP, /* info about key and bucket size */ + int hash_flags, /* info about infoP */ + DiskquotaHashFunction hashFunction) +{ +#if GP_VERSION_NUM < 70000 + if (hashFunction == DISKQUOTA_TAG_HASH) + infoP->hash = tag_hash; + else if (hashFunction == DISKQUOTA_OID_HASH) + infoP->hash = oid_hash; + else + infoP->hash = string_hash; + return ShmemInitHash(name, init_size, max_size, infoP, hash_flags | HASH_FUNCTION); +#else + return ShmemInitHash(name, init_size, max_size, infoP, hash_flags | HASH_BLOBS); +#endif /* GP_VERSION_NUM */ +} diff --git a/gp_activetable.c b/gp_activetable.c index 0cdecab6f4d..73732e67886 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -17,6 +17,9 @@ #include "postgres.h" #include "access/htup_details.h" +#if GP_VERSION_NUM >= 70000 +#include "access/relation.h" +#endif /* GP_VERSION_NUM */ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/objectaccess.h" @@ -29,6 +32,7 @@ #include "executor/spi.h" #include "funcapi.h" #include "libpq-fe.h" +#include "storage/smgr.h" #include "utils/faultinjector.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -99,16 +103,14 @@ init_shm_worker_active_tables(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); - ctl.hash = tag_hash; - active_tables_map = ShmemInitHash("active_tables", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, - HASH_ELEM | HASH_FUNCTION); + active_tables_map = DiskquotaShmemInitHash("active_tables", diskquota_max_active_tables, + diskquota_max_active_tables, &ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(Oid); - ctl.hash = tag_hash; - altered_reloid_cache = ShmemInitHash("altered_reloid_cache", diskquota_max_active_tables, - diskquota_max_active_tables, &ctl, HASH_ELEM | HASH_FUNCTION); + altered_reloid_cache = DiskquotaShmemInitHash("altered_reloid_cache", diskquota_max_active_tables, + diskquota_max_active_tables, &ctl, HASH_ELEM, DISKQUOTA_OID_HASH); } /* @@ -263,8 +265,11 @@ report_relation_cache_helper(Oid relid) { return; } - +#if GP_VERSION_NUM < 70000 rel = diskquota_relation_open(relid, NoLock); +#else + rel = diskquota_relation_open(relid, AccessShareLock); +#endif /* GP_VERSION_NUM */ if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE || rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE || rel->rd_rel->relkind != RELKIND_VIEW) update_relation_cache(relid); @@ -362,10 +367,9 @@ gp_fetch_active_tables(bool is_init) ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - local_table_stats_map = hash_create("local active table map with relfilenode info", 1024, &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_table_stats_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); if (is_init) { @@ -484,7 +488,7 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) /* * prepare attribute metadata for next calls that generate the tuple */ - tupdesc = CreateTemplateTupleDesc(3, false); + tupdesc = DiskquotaCreateTemplateTupleDesc(3); TupleDescInitEntry(tupdesc, (AttrNumber)1, "TABLE_OID", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)2, "TABLE_SIZE", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)3, "GP_SEGMENT_ID", INT2OID, -1, 0); @@ -598,9 +602,7 @@ get_active_tables_stats(ArrayType *array) ctl.keysize = sizeof(TableEntryKey); ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - - local_table = hash_create("local table map", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_table = diskquota_hash_create("local table map", 1024, &ctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); for (i = 0; i < nitems; i++) { @@ -668,8 +670,12 @@ is_relation_being_altered(Oid relid) { LOCKTAG locktag; SetLocktagRelationOid(&locktag, relid); - VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); - bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ +#if GP_VERSION_NUM < 70000 + VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); +#else + VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock, NULL); +#endif /* GP_VERSION_NUM */ + bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ pfree(vxid_list); return being_altered; } @@ -735,17 +741,15 @@ get_active_tables_oid(void) ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - local_active_table_file_map = hash_create("local active table map with relfilenode info", 1024, &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_active_table_file_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(Oid); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = tag_hash; - local_altered_reloid_cache = - hash_create("local_altered_reloid_cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(Oid); + ctl.hcxt = CurrentMemoryContext; + local_altered_reloid_cache = diskquota_hash_create("local_altered_reloid_cache", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); /* Move active table from shared memory to local active table map */ LWLockAcquire(diskquota_locks.active_table_lock, LW_EXCLUSIVE); @@ -773,13 +777,11 @@ get_active_tables_oid(void) memset(&ctl, 0, sizeof(ctl)); /* only use Oid as key here, segid is not needed */ - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_active_table_stats_map = hash_create("local active table map with relfilenode info", 1024, &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + local_active_table_stats_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); remove_committed_relation_from_cache(); @@ -926,8 +928,13 @@ load_table_size(HTAB *local_table_stats_map) ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: return code %d, error: %m", ret))); tupdesc = SPI_tuptable->tupdesc; +#if GP_VERSION_NUM < 70000 if (tupdesc->natts != 3 || ((tupdesc)->attrs[0])->atttypid != OIDOID || ((tupdesc)->attrs[1])->atttypid != INT8OID || ((tupdesc)->attrs[2])->atttypid != INT2OID) +#else + if (tupdesc->natts != 3 || ((tupdesc)->attrs[0]).atttypid != OIDOID || ((tupdesc)->attrs[1]).atttypid != INT8OID || + ((tupdesc)->attrs[2]).atttypid != INT2OID) +#endif /* GP_VERSION_NUM */ { if (tupdesc->natts != 3) { @@ -935,8 +942,13 @@ load_table_size(HTAB *local_table_stats_map) } else { +#if GP_VERSION_NUM < 70000 ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0]->atttypid, tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); +#else + ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0].atttypid, + tupdesc->attrs[1].atttypid, tupdesc->attrs[2].atttypid))); +#endif /* GP_VERSION_NUM */ } ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," " please recreate diskquota extension", @@ -1027,13 +1039,11 @@ pull_active_list_from_seg(void) DiskQuotaActiveTableEntry *entry; memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); - ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_active_table_oid_map = hash_create("local active table map with relfilenode info", 1024, &ctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaActiveTableEntry); + ctl.hcxt = CurrentMemoryContext; + local_active_table_oid_map = diskquota_hash_create("local active table map with relfilenode info", 1024, &ctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); /* first get all oid of tables which are active table on any segment */ sql = "select * from diskquota.diskquota_fetch_table_stat(0, '{}'::oid[])"; diff --git a/gp_activetable.h b/gp_activetable.h index d0a07baf4a4..6b513fe97dc 100644 --- a/gp_activetable.h +++ b/gp_activetable.h @@ -46,6 +46,8 @@ extern HTAB *active_tables_map; extern HTAB *monitored_dbid_cache; extern HTAB *altered_reloid_cache; +#ifndef atooid #define atooid(x) ((Oid)strtoul((x), NULL, 10)) +#endif #endif diff --git a/monitored_db.c b/monitored_db.c index 1e374fc123c..f23cb9a9464 100644 --- a/monitored_db.c +++ b/monitored_db.c @@ -2,6 +2,7 @@ #include "postgres.h" #include "funcapi.h" +#include "pgstat.h" #include "port/atomics.h" #include "commands/dbcommands.h" #include "storage/proc.h" @@ -52,7 +53,7 @@ db_status(PG_FUNCTION_ARGS) /* Switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - tupdesc = CreateTemplateTupleDesc(5, false /*hasoid*/); + tupdesc = DiskquotaCreateTemplateTupleDesc(5); TupleDescInitEntry(tupdesc, (AttrNumber)1, "DBID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)2, "DATNAME", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)3, "STATUS", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); @@ -130,7 +131,8 @@ wait_for_worker_new_epoch(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } /* Sleep for naptime to reduce CPU usage */ - (void)WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT, diskquota_naptime ? diskquota_naptime : 1); + (void)DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + diskquota_naptime ? diskquota_naptime : 1); ResetLatch(&MyProc->procLatch); } PG_RETURN_BOOL(false); diff --git a/quotamodel.c b/quotamodel.c index 13f288a239a..8b7a97570b5 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -42,6 +42,8 @@ #include "cdb/cdbdispatchresult.h" #include "cdb/cdbutil.h" +#include + /* cluster level max size of rejectmap */ #define MAX_DISK_QUOTA_REJECT_ENTRIES (1024 * 1024) /* cluster level init size of rejectmap */ @@ -404,7 +406,11 @@ init_disk_quota_shmem(void) */ RequestAddinShmemSpace(DiskQuotaShmemSize()); /* locks for diskquota refer to init_lwlocks() for details */ +#if GP_VERSION_NUM < 70000 RequestAddinLWLocks(DiskQuotaLocksItemNumber); +#else + RequestNamedLWLockTranche("DiskquotaLocks", DiskQuotaLocksItemNumber); +#endif /* GP_VERSION_NUM */ /* Install startup hook to initialize our shared memory. */ prev_shmem_startup_hook = shmem_startup_hook; @@ -439,10 +445,9 @@ disk_quota_shmem_startup(void) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(RejectMapEntry); hash_ctl.entrysize = sizeof(GlobalRejectMapEntry); - hash_ctl.hash = tag_hash; - - disk_quota_reject_map = ShmemInitHash("rejectmap whose quota limitation is reached", INIT_DISK_QUOTA_REJECT_ENTRIES, - MAX_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + disk_quota_reject_map = + DiskquotaShmemInitHash("rejectmap whose quota limitation is reached", INIT_DISK_QUOTA_REJECT_ENTRIES, + MAX_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); init_shm_worker_active_tables(); @@ -451,10 +456,9 @@ disk_quota_shmem_startup(void) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(struct MonitorDBEntryStruct); - hash_ctl.hash = oid_hash; - monitored_dbid_cache = ShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, - MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + monitored_dbid_cache = DiskquotaShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, + MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM, DISKQUOTA_OID_HASH); init_launcher_shmem(); LWLockRelease(AddinShmemInitLock); } @@ -472,6 +476,7 @@ disk_quota_shmem_startup(void) static void init_lwlocks(void) { +#if GP_VERSION_NUM < 70000 diskquota_locks.active_table_lock = LWLockAssign(); diskquota_locks.reject_map_lock = LWLockAssign(); diskquota_locks.extension_ddl_message_lock = LWLockAssign(); @@ -481,6 +486,18 @@ init_lwlocks(void) diskquota_locks.dblist_lock = LWLockAssign(); diskquota_locks.workerlist_lock = LWLockAssign(); diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); +#else + LWLockPadded *lock_base = GetNamedLWLockTranche("DiskquotaLocks"); + diskquota_locks.active_table_lock = &lock_base[0].lock; + diskquota_locks.reject_map_lock = &lock_base[1].lock; + diskquota_locks.extension_ddl_message_lock = &lock_base[2].lock; + diskquota_locks.extension_ddl_lock = &lock_base[3].lock; + diskquota_locks.monitored_dbid_cache_lock = &lock_base[4].lock; + diskquota_locks.relation_cache_lock = &lock_base[5].lock; + diskquota_locks.dblist_lock = &lock_base[6].lock; + diskquota_locks.workerlist_lock = &lock_base[7].lock; + diskquota_locks.altered_reloid_cache_lock = &lock_base[8].lock; +#endif /* GP_VERSION_NUM */ } static Size @@ -531,36 +548,32 @@ init_disk_quota_model(uint32 id) StringInfoData str; initStringInfo(&str); + format_name("TableSizeEntrymap", id, &str); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TableSizeEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); - hash_ctl.hash = tag_hash; - - format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + table_size_map = DiskquotaShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); /* for localrejectmap */ + /* WARNNING: The max length of name of the map is 48 */ + format_name("localrejectmap", id, &str); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(RejectMapEntry); hash_ctl.entrysize = sizeof(LocalRejectMapEntry); - hash_ctl.hash = tag_hash; - /* WARNNING: The max length of name of the map is 48 */ - format_name("localrejectmap", id, &str); local_disk_quota_reject_map = - ShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + DiskquotaShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); /* for quota_info */ for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - hash_ctl.hash = tag_hash; format_name(quota_info[type].map_name, id, &str); - quota_info[type].map = ShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(struct QuotaMapEntry); + hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); + quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); } pfree(str.data); } @@ -590,14 +603,12 @@ vacuum_disk_quota_model(uint32 id) initStringInfo(&str); /* table_size_map */ + format_name("TableSizeEntrymap", id, &str); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TableSizeEntryKey); hash_ctl.entrysize = sizeof(TableSizeEntry); - hash_ctl.hash = tag_hash; - - format_name("TableSizeEntrymap", id, &str); - table_size_map = ShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + table_size_map = DiskquotaShmemInitHash(str.data, INIT_NUM_TABLE_SIZE_ENTRIES, MAX_NUM_TABLE_SIZE_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); hash_seq_init(&iter, table_size_map); while ((tsentry = hash_seq_search(&iter)) != NULL) { @@ -606,15 +617,13 @@ vacuum_disk_quota_model(uint32 id) } /* localrejectmap */ + format_name("localrejectmap", id, &str); memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(RejectMapEntry); hash_ctl.entrysize = sizeof(LocalRejectMapEntry); - hash_ctl.hash = tag_hash; - /* WARNNING: The max length of name of the map is 48 */ - format_name("localrejectmap", id, &str); local_disk_quota_reject_map = - ShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, - HASH_ELEM | HASH_FUNCTION); + DiskquotaShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); hash_seq_init(&iter, local_disk_quota_reject_map); while ((localrejectentry = hash_seq_search(&iter)) != NULL) { @@ -625,12 +634,11 @@ vacuum_disk_quota_model(uint32 id) for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) { - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - hash_ctl.hash = tag_hash; format_name(quota_info[type].map_name, id, &str); - quota_info[type].map = ShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM | HASH_FUNCTION); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(struct QuotaMapEntry); + hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); + quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); hash_seq_init(&iter, quota_info[type].map); while ((qentry = hash_seq_search(&iter)) != NULL) { @@ -711,7 +719,11 @@ do_check_diskquota_state_is_ready(void) errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; +#if GP_VERSION_NUM < 70000 if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) +#else + if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0]).atttypid != INT4OID) +#endif /* GP_VERSION_NUM */ { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] \"diskquota.state\" is corrupted in database \"%s\"," @@ -1434,8 +1446,13 @@ do_load_quotas(void) errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; +#if GP_VERSION_NUM < 70000 if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0])->atttypid != OIDOID || ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) +#else + if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0]).atttypid != OIDOID || + ((tupdesc)->attrs[1]).atttypid != INT4OID || ((tupdesc)->attrs[2]).atttypid != INT8OID) +#endif /* GP_VERSION_NUM */ { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] configuration table is corrupted in database \"%s\"," @@ -1728,7 +1745,11 @@ GetUserName(Oid relowner, bool skip_name) pg_ltoa(relowner, namestr.data); return pstrdup(namestr.data); } +#if GP_VERSION_NUM < 70000 return GetUserNameFromId(relowner); +#else + return GetUserNameFromId(relowner, false); +#endif /* GP_VERSION_NUM */ } static void @@ -1826,7 +1847,6 @@ refresh_rejectmap(PG_FUNCTION_ARGS) hashctl.keysize = sizeof(RejectMapEntry); hashctl.entrysize = sizeof(GlobalRejectMapEntry); hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; /* * Since uncommitted relations' information and the global rejectmap entries @@ -1835,7 +1855,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) * entries into the local_rejectmap below and then flush the content of the * local_rejectmap to the global rejectmap at the end of this UDF. */ - local_rejectmap = hash_create("local_rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_rejectmap = + diskquota_hash_create("local_rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); get_typlenbyvalalign(rejectmap_elem_type, &elem_width, &elem_type_by_val, &elem_alignment_code); deconstruct_array(rejectmap_array_type, rejectmap_elem_type, elem_width, elem_type_by_val, elem_alignment_code, &datums, &nulls, &reject_array_count); @@ -2103,8 +2124,7 @@ show_rejectmap(PG_FUNCTION_ARGS) /* Switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - tupdesc = CreateTemplateTupleDesc(9, false /*hasoid*/); + tupdesc = DiskquotaCreateTemplateTupleDesc(9); TupleDescInitEntry(tupdesc, (AttrNumber)1, "target_type", TEXTOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)2, "target_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)3, "database_oid", OIDOID, -1 /*typmod*/, 0 /*attdim*/); @@ -2117,16 +2137,15 @@ show_rejectmap(PG_FUNCTION_ARGS) funcctx->tuple_desc = BlessTupleDesc(tupdesc); + rejectmap_ctx = (struct RejectMapCtx *)palloc(sizeof(struct RejectMapCtx)); + /* Create a local hash table and fill it with entries from shared memory. */ memset(&hashctl, 0, sizeof(hashctl)); - hashctl.keysize = sizeof(RejectMapEntry); - hashctl.entrysize = sizeof(GlobalRejectMapEntry); - hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; - - rejectmap_ctx = (struct RejectMapCtx *)palloc(sizeof(struct RejectMapCtx)); - rejectmap_ctx->rejectmap = - hash_create("rejectmap_ctx rejectmap", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + hashctl.keysize = sizeof(RejectMapEntry); + hashctl.entrysize = sizeof(GlobalRejectMapEntry); + hashctl.hcxt = CurrentMemoryContext; + rejectmap_ctx->rejectmap = diskquota_hash_create("rejectmap_ctx rejectmap", 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); LWLockAcquire(diskquota_locks.reject_map_lock, LW_SHARED); hash_seq_init(&hash_seq, disk_quota_reject_map); diff --git a/relation_cache.c b/relation_cache.c index cabf1cd7fd1..224a9c37ce4 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -11,19 +11,23 @@ */ #include "postgres.h" +#if GP_VERSION_NUM >= 70000 +#include "access/relation.h" +#endif /* GP_VERSION_NUM */ #include "catalog/indexing.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" #include "catalog/pg_tablespace.h" #include "catalog/objectaccess.h" +#include "utils/rel.h" +#include "utils/relcache.h" #include "utils/relfilenodemap.h" #include "utils/syscache.h" #include "utils/array.h" #include "utils/inval.h" #include "funcapi.h" - -#include "relation_cache.h" #include "diskquota.h" +#include "relation_cache.h" HTAB *relation_cache = NULL; HTAB *relid_cache = NULL; @@ -39,22 +43,16 @@ init_shm_worker_relation_cache(void) HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); - - ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); - ctl.hash = tag_hash; - - relation_cache = ShmemInitHash("relation_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, - HASH_ELEM | HASH_FUNCTION); + ctl.keysize = sizeof(Oid); + ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); + relation_cache = DiskquotaShmemInitHash("relation_cache", diskquota_max_active_tables, diskquota_max_active_tables, + &ctl, HASH_ELEM, DISKQUOTA_OID_HASH); memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaRelidCacheEntry); - ctl.hash = tag_hash; - - relid_cache = ShmemInitHash("relid_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, - HASH_ELEM | HASH_FUNCTION); + relid_cache = DiskquotaShmemInitHash("relid_cache", diskquota_max_active_tables, diskquota_max_active_tables, &ctl, + HASH_ELEM, DISKQUOTA_OID_HASH); } Oid @@ -138,8 +136,12 @@ static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry) { Relation rel; - +#if GP_VERSION_NUM < 70000 rel = diskquota_relation_open(relid, NoLock); +#else + rel = diskquota_relation_open(relid, AccessShareLock); +#endif /* GP_VERSION_NUM */ + if (rel == NULL) { return; @@ -152,7 +154,8 @@ update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, Di relation_entry->rnode.backend = rel->rd_backend; relation_entry->owneroid = rel->rd_rel->relowner; relation_entry->namespaceoid = rel->rd_rel->relnamespace; - relation_entry->relstorage = rel->rd_rel->relstorage; + relation_entry->relstorage = DiskquotaGetRelstorage(rel->rd_rel); + relation_entry->relam = rel->rd_rel->relam; } if (relid_entry) @@ -221,7 +224,11 @@ parse_primary_table_oid(Oid relid, bool on_bgworker) } else { +#if GP_VERSION_NUM < 70000 rel = diskquota_relation_open(relid, NoLock); +#else + rel = diskquota_relation_open(relid, AccessShareLock); +#endif /* GP_VERSION_NUM */ if (rel == NULL) { return InvalidOid; @@ -276,9 +283,8 @@ remove_committed_relation_from_cache(void) ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); ctl.hcxt = CurrentMemoryContext; - ctl.hash = oid_hash; - - local_relation_cache = hash_create("local relation cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + local_relation_cache = + diskquota_hash_create("local relation cache", 1024, &ctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); hash_seq_init(&iter, relation_cache); @@ -332,7 +338,7 @@ show_relation_cache(PG_FUNCTION_ARGS) /* Switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - tupdesc = CreateTemplateTupleDesc(11, false /*hasoid*/); + tupdesc = DiskquotaCreateTemplateTupleDesc(12); TupleDescInitEntry(tupdesc, (AttrNumber)1, "RELID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)2, "PRIMARY_TABLE_OID", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)3, "AUXREL_NUM", INT4OID, -1 /*typmod*/, 0 /*attdim*/); @@ -344,19 +350,19 @@ show_relation_cache(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, (AttrNumber)9, "RELNODE", OIDOID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)10, "RELSTORAGE", CHAROID, -1 /*typmod*/, 0 /*attdim*/); TupleDescInitEntry(tupdesc, (AttrNumber)11, "AUXREL_OID", OIDARRAYOID, -1 /*typmod*/, 0 /*attdim*/); + TupleDescInitEntry(tupdesc, (AttrNumber)12, "RELAM", OIDOID, -1 /*typmod*/, 0 /*attdim*/); funcctx->tuple_desc = BlessTupleDesc(tupdesc); + relation_cache_ctx = (struct RelationCacheCtx *)palloc(sizeof(struct RelationCacheCtx)); /* Create a local hash table and fill it with entries from shared memory. */ memset(&hashctl, 0, sizeof(hashctl)); hashctl.keysize = sizeof(Oid); hashctl.entrysize = sizeof(DiskQuotaRelationCacheEntry); hashctl.hcxt = CurrentMemoryContext; - hashctl.hash = tag_hash; - relation_cache_ctx = (struct RelationCacheCtx *)palloc(sizeof(struct RelationCacheCtx)); - relation_cache_ctx->relation_cache = hash_create("relation_cache_ctx->relation_cache", 1024, &hashctl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + relation_cache_ctx->relation_cache = diskquota_hash_create("relation_cache_ctx->relation_cache", 1024, &hashctl, + HASH_ELEM | HASH_CONTEXT, DISKQUOTA_OID_HASH); LWLockAcquire(diskquota_locks.relation_cache_lock, LW_SHARED); hash_seq_init(&hash_seq, relation_cache); @@ -385,9 +391,9 @@ show_relation_cache(PG_FUNCTION_ARGS) while ((entry = (DiskQuotaRelationCacheEntry *)hash_seq_search(&(relation_cache_ctx->iter))) != NULL) { Datum result; - Datum values[11]; - Datum auxrel_oid[10]; - bool nulls[11]; + Datum values[12]; + Datum auxrel_oid[11]; + bool nulls[12]; HeapTuple tuple; ArrayType *array; int i; @@ -409,6 +415,7 @@ show_relation_cache(PG_FUNCTION_ARGS) values[8] = ObjectIdGetDatum(entry->rnode.node.relNode); values[9] = CharGetDatum(entry->relstorage); values[10] = PointerGetDatum(array); + values[11] = ObjectIdGetDatum(entry->relam); memset(nulls, false, sizeof(nulls)); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); @@ -472,7 +479,8 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry *relatio relation_entry->primary_table_relid = relid; relation_entry->owneroid = classForm->relowner; relation_entry->namespaceoid = classForm->relnamespace; - relation_entry->relstorage = classForm->relstorage; + relation_entry->relstorage = DiskquotaGetRelstorage(classForm); + relation_entry->relam = classForm->relam; relation_entry->rnode.node.spcNode = OidIsValid(classForm->reltablespace) ? classForm->reltablespace : MyDatabaseTableSpace; relation_entry->rnode.node.dbNode = MyDatabaseId; @@ -486,11 +494,13 @@ get_relation_entry_from_pg_class(Oid relid, DiskQuotaRelationCacheEntry *relatio add_auxrelation_to_relation_entry(classForm->reltoastrelid, relation_entry); } - if (classForm->relstorage == RELSTORAGE_AOROWS || classForm->relstorage == RELSTORAGE_AOCOLS) + heap_freetuple(classTup); + + if (TableIsAoRows(relation_entry->relstorage, relation_entry->relam) || + TableIsAoCols(relation_entry->relstorage, relation_entry->relam)) { is_ao = true; } - heap_freetuple(classTup); /* ao table */ if (is_ao) @@ -553,7 +563,7 @@ get_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *entry) } static void -get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) +get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage, Oid *relam) { DiskQuotaRelationCacheEntry *relation_cache_entry; HeapTuple classTup; @@ -574,7 +584,8 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) rnode->node.dbNode = MyDatabaseId; rnode->node.relNode = classForm->relfilenode; rnode->backend = classForm->relpersistence == RELPERSISTENCE_TEMP ? TempRelBackendId : InvalidBackendId; - *relstorage = classForm->relstorage; + *relstorage = DiskquotaGetRelstorage(classForm); + *relam = classForm->relam; heap_freetuple(classTup); remove_cache_entry(relid, InvalidOid); return; @@ -586,6 +597,7 @@ get_relfilenode_by_relid(Oid relid, RelFileNodeBackend *rnode, char *relstorage) { *rnode = relation_cache_entry->rnode; *relstorage = relation_cache_entry->relstorage; + *relam = relation_cache_entry->relam; } LWLockRelease(diskquota_locks.relation_cache_lock); @@ -597,18 +609,19 @@ do_calculate_table_size(DiskQuotaRelationCacheEntry *entry) { Size tablesize = 0; RelFileNodeBackend rnode; - char relstorage = 0; Oid subrelid; + char relstorage = 0; + Oid relam = InvalidOid; int i; - get_relfilenode_by_relid(entry->relid, &rnode, &relstorage); - tablesize += calculate_relation_size_all_forks(&rnode, relstorage); + get_relfilenode_by_relid(entry->relid, &rnode, &relstorage, &relam); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage, relam); for (i = 0; i < entry->auxrel_num; i++) { subrelid = entry->auxrel_oid[i]; - get_relfilenode_by_relid(subrelid, &rnode, &relstorage); - tablesize += calculate_relation_size_all_forks(&rnode, relstorage); + get_relfilenode_by_relid(subrelid, &rnode, &relstorage, &relam); + tablesize += calculate_relation_size_all_forks(&rnode, relstorage, relam); } return tablesize; } diff --git a/relation_cache.h b/relation_cache.h index c9f662617b9..f9e14b9d1a8 100644 --- a/relation_cache.h +++ b/relation_cache.h @@ -25,6 +25,7 @@ typedef struct DiskQuotaRelationCacheEntry Oid owneroid; Oid namespaceoid; char relstorage; + Oid relam; RelFileNodeBackend rnode; } DiskQuotaRelationCacheEntry; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 42ae25e3491..9a77457250a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,13 +1,18 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) +if (${GP_MAJOR_VERSION} EQUAL 7) + set(EXPECTED_DIR_SUFFIX "7") +endif() + + RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql - EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected + EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected${EXPECTED_DIR_SUFFIX} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data - SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule${EXPECTED_DIR_SUFFIX} REGRESS_OPTS --load-extension=gp_inject_fault --load-extension=diskquota_test @@ -19,10 +24,10 @@ RegressTarget_Add(isolation2 INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql - EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected + EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected${EXPECTED_DIR_SUFFIX} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data - SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule${EXPECTED_DIR_SUFFIX} REGRESS_OPTS --load-extension=gp_inject_fault --dbname=isolation2test) diff --git a/tests/init_file b/tests/init_file index 2134886486b..477b135590c 100644 --- a/tests/init_file +++ b/tests/init_file @@ -7,6 +7,8 @@ m/^(?:HINT|NOTICE):\s+.+\'DISTRIBUTED BY\' clause.*/ m/WARNING: \[diskquota\] worker not found for database.*/ m/WARNING: \[diskquota\] database .* not found for getting epoch .*/ +m/^NOTICE: CREATE TABLE will create partition */ +m/^WARNING: skipping .* cannot calculate this foreign table size.*/ -- end_matchignore -- start_matchsubs diff --git a/tests/isolation2/expected7/config.out b/tests/isolation2/expected7/config.out new file mode 100644 index 00000000000..8ad8cbd0d08 --- /dev/null +++ b/tests/isolation2/expected7/config.out @@ -0,0 +1,30 @@ + +!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +(exited with code 0) +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +(exited with code 0) +!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; +(exited with code 0) + +!\retcode gpstop -raf; +(exited with code 0) + +-- Show the values of all GUC variables +--start_ignore +-- naptime cannot be 0 for release build +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) +--end_ignore +1: SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 307200 +(1 row) +1: SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) diff --git a/tests/isolation2/expected7/reset_config.out b/tests/isolation2/expected7/reset_config.out new file mode 100644 index 00000000000..3d076b36cca --- /dev/null +++ b/tests/isolation2/expected7/reset_config.out @@ -0,0 +1,10 @@ +!\retcode gpconfig -c diskquota.naptime -v 2; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) + +1: SHOW diskquota.naptime; + diskquota.naptime +------------------- + 2 +(1 row) diff --git a/tests/isolation2/expected7/setup.out b/tests/isolation2/expected7/setup.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/isolation2/expected7/test_create_extension.out b/tests/isolation2/expected7/test_create_extension.out new file mode 100644 index 00000000000..211ebd639f6 --- /dev/null +++ b/tests/isolation2/expected7/test_create_extension.out @@ -0,0 +1,15 @@ +CREATE EXTENSION diskquota; +CREATE + +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) diff --git a/tests/isolation2/expected7/test_drop_extension.out b/tests/isolation2/expected7/test_drop_extension.out new file mode 100644 index 00000000000..4a9e4ecb16f --- /dev/null +++ b/tests/isolation2/expected7/test_drop_extension.out @@ -0,0 +1,12 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +DROP EXTENSION diskquota; +DROP diff --git a/tests/isolation2/expected7/test_fast_quota_view.out b/tests/isolation2/expected7/test_fast_quota_view.out new file mode 100644 index 00000000000..22bde74857d --- /dev/null +++ b/tests/isolation2/expected7/test_fast_quota_view.out @@ -0,0 +1,182 @@ +CREATE SCHEMA s1; +CREATE +CREATE SCHEMA s2; +CREATE + +CREATE ROLE r LOGIN SUPERUSER; +CREATE + +!\retcode mkdir -p /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode mkdir -p /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) + +DROP TABLESPACE IF EXISTS spc1; +DROP +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +CREATE +DROP TABLESPACE IF EXISTS spc2; +DROP +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; +CREATE + +SELECT diskquota.set_schema_quota('s1', '100 MB'); + set_schema_quota +------------------ + +(1 row) +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +SELECT diskquota.set_role_quota('r', '100 MB'); + set_role_quota +---------------- + +(1 row) +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s1.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO s2.t SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +-- login r to test role quota view +1: SET ROLE r; +SET + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t1 SELECT generate_series(1, 100000); +INSERT 100000 + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +CREATE +1: INSERT INTO t2 SELECT generate_series(1, 100000); +INSERT 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +DROP TABLE IF EXISTS s1.t; +DROP +DROP TABLE IF EXISTS s2.t; +DROP +DROP TABLE IF EXISTS t1; +DROP +DROP TABLE IF EXISTS t2; +DROP + +DROP SCHEMA IF EXISTS s1; +DROP +DROP SCHEMA IF EXISTS s2; +DROP +DROP ROLE IF EXISTS r; +DROP + +DROP TABLESPACE IF EXISTS spc1; +DROP +DROP TABLESPACE IF EXISTS spc2; +DROP + +!\retcode rm -rf /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode rm -rf /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected7/test_per_segment_config.out b/tests/isolation2/expected7/test_per_segment_config.out new file mode 100644 index 00000000000..79b4a8ffcdc --- /dev/null +++ b/tests/isolation2/expected7/test_per_segment_config.out @@ -0,0 +1,269 @@ +-- Test one session read tablespace segratio, +-- and at the same time, another session +-- update or insert the segratio + +-- start_ignore +!\retcode mkdir -p /tmp/spc101; +-- start_ignore + +-- end_ignore +(exited with code 0) +-- end_ignore +CREATE SCHEMA s101; +CREATE +DROP TABLESPACE IF EXISTS spc101; +DROP +CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; +CREATE + +-- +-- There is no tablesapce per segment quota configed yet +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- +-- There is already a tablesapce per segment quota configed +-- + +-- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', 1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', 1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 1 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- + 1 +(1 row) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_per_segment_quota('spc101', -1); + set_per_segment_quota +----------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); +1: COMMIT; +COMMIT +2<: <... completed> + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE + +-- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio +SELECT diskquota.set_per_segment_quota('spc101', 2); + set_per_segment_quota +----------------------- + +(1 row) +1: BEGIN; +BEGIN +1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +2: BEGIN; +BEGIN +2&: SELECT diskquota.set_per_segment_quota('spc101', -1); +1: COMMIT; +COMMIT +2<: <... completed> + set_per_segment_quota +----------------------- + +(1 row) +2: COMMIT; +COMMIT + +SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; + segratio +---------- + 0 +(1 row) +SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; + segratio +---------- +(0 rows) +-- cleanup +truncate table diskquota.quota_config; +TRUNCATE +truncate table diskquota.target; +TRUNCATE +DROP SCHEMA s101; +DROP +DROP TABLESPACE spc101; +DROP diff --git a/tests/isolation2/expected7/test_postmaster_restart.out b/tests/isolation2/expected7/test_postmaster_restart.out new file mode 100644 index 00000000000..5f01eee9379 --- /dev/null +++ b/tests/isolation2/expected7/test_postmaster_restart.out @@ -0,0 +1,139 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA postmaster_restart_s; +CREATE +1: SET search_path TO postmaster_restart_s; +SET + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 157893 (seg0 127.0.0.1:6002 pid=1025673) +1q: ... + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +2774491 + +-- end_ignore +(exited with code 0) +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +2774659 + +-- end_ignore +(exited with code 0) + +-- stop postmaster +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; +-- start_ignore +waiting for server to shut down.... done +server stopped +-- end_ignore +(exited with code 0) + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore + +-- end_ignore +(exited with code 1) +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore + +-- end_ignore +(exited with code 1) + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; +-- start_ignore +waiting for server to start....2022-02-14 21:41:39.147869 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""ftsprobe process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.147899 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""dtx recovery process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.147934 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.148550 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""[diskquota] - launcher""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, +2022-02-14 21:41:39.272714 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""pg_log"".",,,,,,"SysLogger_Start","syslogger.c",986, + done +server started + +-- end_ignore +(exited with code 0) +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; +-- start_ignore + +-- end_ignore +(exited with code 0) + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +2771049 + +-- end_ignore +(exited with code 0) +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +2771074 + +-- end_ignore +(exited with code 0) + +1: SET search_path TO postmaster_restart_s; +SET +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 158089 (seg0 127.0.0.1:6002 pid=1027799) +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); +CREATE 1000000 + +1: DROP SCHEMA postmaster_restart_s CASCADE; +DROP +1q: ... +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected7/test_rejectmap.out b/tests/isolation2/expected7/test_rejectmap.out new file mode 100644 index 00000000000..5e15acceb67 --- /dev/null +++ b/tests/isolation2/expected7/test_rejectmap.out @@ -0,0 +1,738 @@ +-- +-- This file contains tests for dispatching rejectmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE +CREATE INDEX blocked_t4_index ON blocked_t4(i); +CREATE +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +INSERT 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP +DROP TABLE blocked_t2; +DROP +DROP TABLE blocked_t3; +DROP +DROP TABLE blocked_t4; +DROP +DROP TABLE blocked_t5; +DROP +DROP TABLE blocked_t6; +DROP + +-- +-- Below are helper functions for testing adding uncommitted relations to rejectmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE plpythonu; +CREATE +-- end_ignore +CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); +CREATE + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: f.write(v['row'][1:-1] + '\n') $$ LANGUAGE plpythonu; +CREATE + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpythonu; +CREATE + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE + +-- This function helps dispatch rejectmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+---------------------------+-----------------+------------ + 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(3 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +1: ABORT; +ABORT +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/tests/isolation2/expected7/test_relation_cache.out b/tests/isolation2/expected7/test_relation_cache.out new file mode 100644 index 00000000000..df61fdb810f --- /dev/null +++ b/tests/isolation2/expected7/test_relation_cache.out @@ -0,0 +1,70 @@ +CREATE DATABASE tempdb1; +CREATE +CREATE DATABASE tempdb2; +CREATE + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +CREATE +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: CREATE EXTENSION diskquota; +CREATE +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +BEGIN +1:@db_name tempdb1: CREATE TABLE t(i int); +CREATE +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); +INSERT 10000 + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + count +------- + 0 +(1 row) + +1:@db_name tempdb1: ABORT; +ABORT + +1:@db_name tempdb1: SELECT diskquota.pause(); + pause +------- + +(1 row) +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1:@db_name tempdb1: DROP EXTENSION diskquota; +DROP +2:@db_name tempdb2: SELECT diskquota.pause(); + pause +------- + +(1 row) +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: DROP EXTENSION diskquota; +DROP +1q: ... +2q: ... + +DROP DATABASE tempdb1; +DROP +DROP DATABASE tempdb2; +DROP diff --git a/tests/isolation2/expected7/test_relation_size.out b/tests/isolation2/expected7/test_relation_size.out new file mode 100644 index 00000000000..45e9a9cc149 --- /dev/null +++ b/tests/isolation2/expected7/test_relation_size.out @@ -0,0 +1,87 @@ +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); +CREATE +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); +INSERT 100 +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dropped'); + relation_size +--------------- + 98304 +(1 row) + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Drop the table. +DROP TABLE t_dropped; +DROP +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +-- Session 1 will continue and returns 0. +1<: <... completed> + relation_size +--------------- + 0 +(1 row) + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE +1: BEGIN; +BEGIN +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +2: BEGIN; +BEGIN +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 10000 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +SELECT diskquota.relation_size('t_ao'); + relation_size +--------------- + 200400 +(1 row) +SELECT pg_relation_size('t_ao'); + pg_relation_size +------------------ + 200400 +(1 row) +DROP TABLE t_ao; +DROP diff --git a/tests/isolation2/expected7/test_truncate.out b/tests/isolation2/expected7/test_truncate.out new file mode 100644 index 00000000000..d176b404eda --- /dev/null +++ b/tests/isolation2/expected7/test_truncate.out @@ -0,0 +1,79 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: TRUNCATE dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +TRUNCATE + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/tests/isolation2/expected7/test_vacuum.out b/tests/isolation2/expected7/test_vacuum.out new file mode 100644 index 00000000000..47eb944d968 --- /dev/null +++ b/tests/isolation2/expected7/test_vacuum.out @@ -0,0 +1,92 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 1000 +DELETE FROM dummy_t1; +DELETE 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: VACUUM FULL dummy_t1; +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +VACUUM + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP diff --git a/tests/isolation2/expected7/test_worker_timeout.out b/tests/isolation2/expected7/test_worker_timeout.out new file mode 100644 index 00000000000..5f855a7b80c --- /dev/null +++ b/tests/isolation2/expected7/test_worker_timeout.out @@ -0,0 +1,38 @@ +!\retcode gpconfig -c diskquota.worker_timeout -v 1; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); + pg_sleep +---------- + +(1 row) + +SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; + pg_cancel_backend +------------------- + t +(1 row) + +SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1<: <... completed> +ERROR: canceling statement due to user request + +!\retcode gpconfig -r diskquota.worker_timeout; +(exited with code 0) +!\retcode gpstop -u; +(exited with code 0) diff --git a/tests/isolation2/isolation2_schedule7 b/tests/isolation2/isolation2_schedule7 new file mode 100644 index 00000000000..56792ee63e8 --- /dev/null +++ b/tests/isolation2/isolation2_schedule7 @@ -0,0 +1,13 @@ +test: config +test: test_create_extension +test: test_fast_quota_view +test: test_relation_size +# test: test_rejectmap +test: test_vacuum +test: test_truncate +# test: test_postmaster_restart +test: test_worker_timeout +test: test_per_segment_config +test: test_relation_cache +test: test_drop_extension +test: reset_config diff --git a/tests/isolation2/sql/setup.sql b/tests/isolation2/sql/setup.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/regress/diskquota_schedule7 b/tests/regress/diskquota_schedule7 new file mode 100644 index 00000000000..50dc40e3b60 --- /dev/null +++ b/tests/regress/diskquota_schedule7 @@ -0,0 +1,46 @@ +test: config +test: test_create_extension +test: test_readiness_logged +test: test_init_table_size_table +test: test_relation_size +test: test_relation_cache +test: test_uncommitted_table_size +test: test_pause_and_resume +test: test_pause_and_resume_multiple_db +test: test_drop_after_pause +test: test_show_status +test: test_update_db_cache +test: test_quota_view_no_table +# disable this test due to GPDB behavior change +# test: test_table_size +test: test_fast_disk_check +test: test_worker_not_ready +#test: test_insert_after_drop +test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate +test: test_ctas_no_preload_lib +test: test_ctas_before_set_quota +test: test_truncate +test: test_delete_quota +test: test_partition +test: test_vacuum +# plpython is not avilable in gpdb7, should change it to plpython3 +# test: test_primary_failure +test: test_extension +test: test_activetable_limit +test: test_many_active_tables +test: test_fetch_table_stat +test: test_appendonly +test: test_rejectmap +test: test_clean_rejectmap_after_drop +test: test_rejectmap_mul_db +test: test_ctas_pause +test: test_ctas_role +test: test_ctas_schema +test: test_ctas_tablespace_role +test: test_ctas_tablespace_schema +test: test_default_tablespace +test: test_tablespace_diff_schema +test: test_worker_schedule +test: test_worker_schedule_exception +test: test_drop_extension +test: reset_config diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index 2d4575339db..d0c465afd1b 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -2,11 +2,7 @@ CREATE SCHEMA s_appendonly; SET search_path TO s_appendonly; CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. CREATE INDEX index_t ON t_ao(i); CREATE INDEX index_t2 ON t_aoco(i); diff --git a/tests/regress/expected/test_relation_cache.out b/tests/regress/expected/test_relation_cache.out index 52a3efb45ff..38101c0f202 100644 --- a/tests/regress/expected/test_relation_cache.out +++ b/tests/regress/expected/test_relation_cache.out @@ -13,8 +13,6 @@ $$ LANGUAGE plpgsql; -- heap table begin; create table t(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1, 100000); select count(*) from diskquota.show_relation_cache_all_seg(); count @@ -39,8 +37,6 @@ drop table t; -- toast table begin; create table t(t text) DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); select count(*) from diskquota.show_relation_cache_all_seg(); count @@ -71,8 +67,6 @@ drop table t; -- AO table begin; create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; select count(*) from diskquota.show_relation_cache_all_seg(); count @@ -103,8 +97,6 @@ drop table t; -- AOCS table begin; create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; select count(*) from diskquota.show_relation_cache_all_seg(); count diff --git a/tests/regress/expected/test_uncommitted_table_size.out b/tests/regress/expected/test_uncommitted_table_size.out index 8cbc401931f..aa144bd14a0 100644 --- a/tests/regress/expected/test_uncommitted_table_size.out +++ b/tests/regress/expected/test_uncommitted_table_size.out @@ -27,8 +27,6 @@ DROP table t1; -- heap table begin; CREATE TABLE t2(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -75,8 +73,6 @@ DROP table t2; -- toast table begin; CREATE TABLE t3(t text) DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 't' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -101,8 +97,6 @@ DROP table t3; -- AO table begin; CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO ao SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -156,8 +150,6 @@ DROP TABLE ao; -- AO table CTAS begin; CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- @@ -176,8 +168,6 @@ DROP TABLE ao; -- AOCS table begin; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -224,8 +214,6 @@ DROP TABLE aocs; -- AOCS table CTAS begin; CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/expected7/config.out b/tests/regress/expected7/config.out new file mode 100644 index 00000000000..d266f9bf501 --- /dev/null +++ b/tests/regress/expected7/config.out @@ -0,0 +1,70 @@ +--start_ignore +CREATE DATABASE diskquota; +ERROR: database "diskquota" already exists +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +20230117:12:40:53:1895897 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c shared_preload_libraries -v diskquota-2.2.so' +\! gpconfig -c diskquota.naptime -v 0 --skipvalidation +20230117:12:40:53:1896062 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' +\! gpconfig -c max_worker_processes -v 20 --skipvalidation +20230117:12:40:54:1896331 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 20 --skipvalidation' +\! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation +20230117:12:40:55:1896588 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.hard_limit -v off --skipvalidation' +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +20230117:12:40:55:1896848 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' +\! gpconfig -c log_min_messages -v debug1 +20230117:12:40:56:1897088 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c log_min_messages -v debug1' +\! gpstop -raf +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -raf +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore +\c +-- Show the values of all GUC variables +-- start_ignore +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 0 +(1 row) + +-- end_ignore +SHOW diskquota.max_active_tables; + diskquota.max_active_tables +----------------------------- + 307200 +(1 row) + +SHOW diskquota.worker_timeout; + diskquota.worker_timeout +-------------------------- + 60 +(1 row) + +SHOW diskquota.hard_limit; + diskquota.hard_limit +---------------------- + off +(1 row) + diff --git a/tests/regress/expected7/reset_config.out b/tests/regress/expected7/reset_config.out new file mode 100644 index 00000000000..9f6797259a7 --- /dev/null +++ b/tests/regress/expected7/reset_config.out @@ -0,0 +1,17 @@ +--start_ignore +\! gpconfig -c diskquota.naptime -v 2 +20230117:13:11:41:2012767 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 2' +\! gpstop -u +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -u +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Signalling all postmaster processes to reload +--end_ignore +SHOW diskquota.naptime; + diskquota.naptime +------------------- + 2 +(1 row) + diff --git a/tests/regress/expected7/test_activetable_limit.out b/tests/regress/expected7/test_activetable_limit.out new file mode 100644 index 00000000000..c556f32bb38 --- /dev/null +++ b/tests/regress/expected7/test_activetable_limit.out @@ -0,0 +1,56 @@ +-- table in 'diskquota not enabled database' should not be activetable +\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null +\! gpstop -arf > /dev/null +\c +CREATE DATABASE test_tablenum_limit_01; +CREATE DATABASE test_tablenum_limit_02; +\c test_tablenum_limit_01 +CREATE TABLE a01(i int) DISTRIBUTED BY (i); +CREATE TABLE a02(i int) DISTRIBUTED BY (i); +CREATE TABLE a03(i int) DISTRIBUTED BY (i); +INSERT INTO a01 values(generate_series(0, 500)); +INSERT INTO a02 values(generate_series(0, 500)); +INSERT INTO a03 values(generate_series(0, 500)); +\c test_tablenum_limit_02 +CREATE EXTENSION diskquota; +CREATE SCHEMA s; +SELECT diskquota.set_schema_quota('s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 +INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 +INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed +ERROR: schema's disk space quota exceeded with name: s +CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. +INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed +ERROR: schema's disk space quota exceeded with name: s +-- Q: why diskquota still works when activetable = 3? +-- A: the activetable limit by shmem size, calculate by hash_estimate_size() +-- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables +-- the real capacity of this data structure based on the hash conflict probability. +-- so we can not predict when the data structure will be fill in fully. +-- +-- this test case is useless, remove this if anyone dislike it. +-- but the hash capacity is smaller than 6, so the test case works for issue 51 +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_tablenum_limit_01; +DROP DATABASE test_tablenum_limit_02; +\! gpconfig -r diskquota.max_active_tables > /dev/null +\! gpstop -arf > /dev/null diff --git a/tests/regress/expected7/test_appendonly.out b/tests/regress/expected7/test_appendonly.out new file mode 100644 index 00000000000..d324bd7623e --- /dev/null +++ b/tests/regress/expected7/test_appendonly.out @@ -0,0 +1,72 @@ +-- Create new schema for running tests. +CREATE SCHEMA s_appendonly; +SET search_path TO s_appendonly; +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +-- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. +CREATE INDEX index_t ON t_ao(i); +CREATE INDEX index_t2 ON t_aoco(i); +-- 1. Show that the relation's size in diskquota.table_size +-- is identical to the result of pg_table_size(). +INSERT INTO t_ao SELECT generate_series(1, 100); +INSERT INTO t_aoco SELECT generate_series(1, 100); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Query the size of t_ao. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; + tableid | size +---------+-------- + t_ao | 558168 +(1 row) + +SELECT pg_table_size('t_ao'); + pg_table_size +--------------- + 558168 +(1 row) + +-- Query the size of t_aoco. +SELECT tableid::regclass, size + FROM diskquota.table_size + WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; + tableid | size +---------+-------- + t_aoco | 557584 +(1 row) + +SELECT pg_table_size('t_aoco'); + pg_table_size +--------------- + 557584 +(1 row) + +-- 2. Test that we are able to perform quota limit on appendonly tables. +SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +-- expect success. +INSERT INTO t_ao SELECT generate_series(1, 1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail. +INSERT INTO t_ao SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name: s_appendonly +INSERT INTO t_aoco SELECT generate_series(1, 10); +ERROR: schema's disk space quota exceeded with name: s_appendonly +DROP TABLE t_ao; +DROP TABLE t_aoco; +SET search_path TO DEFAULT; +DROP SCHEMA s_appendonly; diff --git a/tests/regress/expected7/test_clean_rejectmap_after_drop.out b/tests/regress/expected7/test_clean_rejectmap_after_drop.out new file mode 100644 index 00000000000..30c63756cce --- /dev/null +++ b/tests/regress/expected7/test_clean_rejectmap_after_drop.out @@ -0,0 +1,42 @@ +CREATE DATABASE test_clean_rejectmap_after_drop; +\c test_clean_rejectmap_after_drop +CREATE EXTENSION diskquota; +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE ROLE r; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('r', '1MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO r; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: role's disk space quota exceeded with name: 40071 (seg1 127.0.0.1:7003 pid=1958088) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +INSERT INTO b SELECT generate_series(1, 100); -- ok +\c contrib_regression +DROP DATABASE test_clean_rejectmap_after_drop; +DROP ROLE r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_column.out b/tests/regress/expected7/test_column.out new file mode 100644 index 00000000000..a5eb051c755 --- /dev/null +++ b/tests/regress/expected7/test_column.out @@ -0,0 +1,42 @@ +-- Test alter table add column +CREATE SCHEMA scolumn; +SELECT diskquota.set_schema_quota('scolumn', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO scolumn; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +-- expect fail +INSERT INTO a2 SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: scolumn +ALTER TABLE a2 ADD COLUMN j VARCHAR(50); +UPDATE a2 SET j = 'add value for column j'; +ERROR: schema's disk space quota exceeded with name: scolumn +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert failed after add column +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: scolumn +DROP TABLE a2; +RESET search_path; +DROP SCHEMA scolumn; diff --git a/tests/regress/expected7/test_copy.out b/tests/regress/expected7/test_copy.out new file mode 100644 index 00000000000..2c3fff9ff84 --- /dev/null +++ b/tests/regress/expected7/test_copy.out @@ -0,0 +1,26 @@ +-- Test copy +CREATE SCHEMA s3; +SELECT diskquota.set_schema_quota('s3', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s3; +\! seq 100 > /tmp/csmall.txt +CREATE TABLE c (i int) DISTRIBUTED BY (i); +COPY c FROM '/tmp/csmall.txt'; +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect copy fail +COPY c FROM '/tmp/csmall.txt'; +ERROR: schema's disk space quota exceeded with name: s3 +DROP TABLE c; +RESET search_path; +DROP SCHEMA s3; diff --git a/tests/regress/expected7/test_create_extension.out b/tests/regress/expected7/test_create_extension.out new file mode 100644 index 00000000000..a90178ce350 --- /dev/null +++ b/tests/regress/expected7/test_create_extension.out @@ -0,0 +1,14 @@ +CREATE EXTENSION diskquota; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/tests/regress/expected7/test_ctas_before_set_quota.out b/tests/regress/expected7/test_ctas_before_set_quota.out new file mode 100644 index 00000000000..ac69b2b5226 --- /dev/null +++ b/tests/regress/expected7/test_ctas_before_set_quota.out @@ -0,0 +1,61 @@ +CREATE ROLE test SUPERUSER; +SET ROLE test; +CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; + tableid | size | segid +--------------------+---------+------- + t_before_set_quota | 3637248 | -1 + t_before_set_quota | 1212416 | 0 + t_before_set_quota | 1212416 | 1 + t_before_set_quota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE t_before_set_quota; +RESET ROLE; +DROP ROLE test; diff --git a/tests/regress/expected7/test_ctas_no_preload_lib.out b/tests/regress/expected7/test_ctas_no_preload_lib.out new file mode 100644 index 00000000000..b85a18ac92b --- /dev/null +++ b/tests/regress/expected7/test_ctas_no_preload_lib.out @@ -0,0 +1,85 @@ +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -far > /dev/null +\c +CREATE ROLE test SUPERUSER; +SET ROLE test; +-- Create table with diskquota disabled +CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null +\! gpstop -far > /dev/null +\c +SET ROLE test; +-- Init table_size to include the table +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Restart to load diskquota.table_size to the memory. +\! gpstop -far > /dev/null +\c +SET ROLE test; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; + tableid | size | segid +---------------------+---------+------- + t_without_diskquota | 3637248 | -1 + t_without_diskquota | 1212416 | 0 + t_without_diskquota | 1212416 | 1 + t_without_diskquota | 1212416 | 2 +(4 rows) + +-- Ensure that the table is not active +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +SELECT diskquota.set_role_quota(current_role, '1MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Expect that current role is in the rejectmap +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- + test +(1 row) + +SELECT diskquota.set_role_quota(current_role, '-1'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; + rolname +--------- +(0 rows) + +DROP TABLE t_without_diskquota; +RESET ROLE; +DROP ROLE test; diff --git a/tests/regress/expected7/test_ctas_pause.out b/tests/regress/expected7/test_ctas_pause.out new file mode 100644 index 00000000000..76e02f10be1 --- /dev/null +++ b/tests/regress/expected7/test_ctas_pause.out @@ -0,0 +1,37 @@ +CREATE SCHEMA hardlimit_s; +SET search_path TO hardlimit_s; +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail +ERROR: schema's disk space quota exceeded with name: 40272 (seg0 127.0.0.1:7002 pid=1962803) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +DROP SCHEMA hardlimit_s CASCADE; +NOTICE: drop cascades to table t1 diff --git a/tests/regress/expected7/test_ctas_role.out b/tests/regress/expected7/test_ctas_role.out new file mode 100644 index 00000000000..facb95b5b12 --- /dev/null +++ b/tests/regress/expected7/test_ctas_role.out @@ -0,0 +1,81 @@ +-- Test that diskquota is able to cancel a running CTAS query by the role quota. +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +CREATE ROLE hardlimit_r; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); + set_role_quota +---------------- + +(1 row) + +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- temp table +CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: role's disk space quota exceeded with name: 40279 (seg0 127.0.0.1:7002 pid=1964561) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ERROR: role's disk space quota exceeded with name: 40279 (seg0 127.0.0.1:7002 pid=1964561) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET ROLE; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_ctas_schema.out b/tests/regress/expected7/test_ctas_schema.out new file mode 100644 index 00000000000..e2e810d6f53 --- /dev/null +++ b/tests/regress/expected7/test_ctas_schema.out @@ -0,0 +1,64 @@ +-- Test that diskquota is able to cancel a running CTAS query by the schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO hardlimit_s; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: schema's disk space quota exceeded with name: 40394 (seg2 127.0.0.1:7004 pid=1966566) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +ERROR: schema's disk space quota exceeded with name: 40394 (seg1 127.0.0.1:7003 pid=1966565) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: schema's disk space quota exceeded with name: 40394 (seg0 127.0.0.1:7002 pid=1966564) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ERROR: schema's disk space quota exceeded with name: 40394 (seg2 127.0.0.1:7004 pid=1966566) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +DROP SCHEMA hardlimit_s; diff --git a/tests/regress/expected7/test_ctas_tablespace_role.out b/tests/regress/expected7/test_ctas_tablespace_role.out new file mode 100644 index 00000000000..c6d3bb6302e --- /dev/null +++ b/tests/regress/expected7/test_ctas_tablespace_role.out @@ -0,0 +1,78 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- start_ignore +\! mkdir -p /tmp/ctas_rolespc +-- end_ignore +-- prepare role and tablespace. +DROP TABLESPACE IF EXISTS ctas_rolespc; +NOTICE: tablespace "ctas_rolespc" does not exist, skipping +CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; +CREATE ROLE hardlimit_r; +NOTICE: resource queue required -- using default resource queue "pg_default" +GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; +GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; +SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET default_tablespace = ctas_rolespc; +SET ROLE hardlimit_r; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups. +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET ROLE; +RESET default_tablespace; +DROP TABLESPACE ctas_rolespc; +REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; +DROP ROLE hardlimit_r; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_ctas_tablespace_schema.out b/tests/regress/expected7/test_ctas_tablespace_schema.out new file mode 100644 index 00000000000..9c9bde2e190 --- /dev/null +++ b/tests/regress/expected7/test_ctas_tablespace_schema.out @@ -0,0 +1,74 @@ +-- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +-- start_ignore +\! mkdir -p /tmp/ctas_schemaspc +-- end_ignore +-- prepare tablespace and schema +DROP TABLESPACE IF EXISTS ctas_schemaspc; +NOTICE: tablespace "ctas_schemaspc" does not exist, skipping +CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; +CREATE SCHEMA hardlimit_s; +SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO hardlimit_s; +SET default_tablespace = ctas_schemaspc; +-- heap table +CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg0 127.0.0.1:7002 pid=1970360) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- toast table +CREATE TABLE toast_table (i) + AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); +ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg1 127.0.0.1:7003 pid=1970361) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- ao table +CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); +ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg0 127.0.0.1:7002 pid=1970360) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- aocs table +CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) + AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); +ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg2 127.0.0.1:7004 pid=1970362) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- disable hardlimit and do some clean-ups +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP TABLE IF EXISTS toast_table; +NOTICE: table "toast_table" does not exist, skipping +DROP TABLE IF EXISTS ao_table; +NOTICE: table "ao_table" does not exist, skipping +DROP TABLE IF EXISTS aocs_table; +NOTICE: table "aocs_table" does not exist, skipping +RESET search_path; +RESET default_tablespace; +DROP SCHEMA hardlimit_s; +DROP TABLESPACE ctas_schemaspc; +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_default_tablespace.out b/tests/regress/expected7/test_default_tablespace.out new file mode 100644 index 00000000000..d14251b5dc5 --- /dev/null +++ b/tests/regress/expected7/test_default_tablespace.out @@ -0,0 +1,186 @@ +-- test role_tablespace_quota works with tables/databases in default tablespace +-- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on +-- start_ignore +\! mkdir -p /tmp/custom_tablespace +-- end_ignore +DROP ROLE if EXISTS role1; +NOTICE: role "role1" does not exist, skipping +DROP ROLE if EXISTS role2; +NOTICE: role "role2" does not exist, skipping +CREATE ROLE role1 SUPERUSER; +CREATE ROLE role2 SUPERUSER; +SET ROLE role1; +DROP TABLE if EXISTS t; +NOTICE: table "t" does not exist, skipping +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +INSERT INTO t SELECT generate_series(1, 100); +INSERT INTO t SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail +INSERT INTO t SELECT generate_series(1, 1000000); +ERROR: tablespace: pg_default, role: role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+------------+----------------------- + role1 | pg_default | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t; +SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SET ROLE role2; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail because of hard limits +INSERT INTO t SELECT generate_series(1, 50000000); +ERROR: tablespace: 1663, role: 40739 diskquota exceeded (seg0 127.0.0.1:7002 pid=1971570) +DROP TABLE IF EXISTS t; +SET ROLE role1; +-- database in customized tablespace +CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; +CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; +\c db_with_tablespace; +SET ROLE role1; +CREATE EXTENSION diskquota; +-- with hard limits off +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to success +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert to fail +INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); +ERROR: tablespace: custom_tablespace, role: role1 diskquota exceeded +SELECT r.rolname, t.spcname, b.target_type +FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r +WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' +ORDER BY r.rolname, t.spcname, b.target_type; + rolname | spcname | target_type +---------+-------------------+----------------------- + role1 | custom_tablespace | ROLE_TABLESPACE_QUOTA +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SET ROLE role2; +-- with hard limits on +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +-- expect insert to fail because of hard limits +CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); +ERROR: tablespace: 40746, role: 40739 diskquota exceeded (seg2 127.0.0.1:7004 pid=1973467) +-- clean up +DROP TABLE IF EXISTS t_in_custom_tablespace; +NOTICE: table "t_in_custom_tablespace" does not exist, skipping +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION IF EXISTS diskquota; +\c contrib_regression; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP DATABASE IF EXISTS db_with_tablespace; +DROP TABLESPACE IF EXISTS custom_tablespace; +RESET ROLE; +DROP ROLE IF EXISTS role1; +DROP ROLE IF EXISTS role2; diff --git a/tests/regress/expected7/test_delete_quota.out b/tests/regress/expected7/test_delete_quota.out new file mode 100644 index 00000000000..967dd917f74 --- /dev/null +++ b/tests/regress/expected7/test_delete_quota.out @@ -0,0 +1,37 @@ +-- Test delete disk quota +CREATE SCHEMA deleteschema; +SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO deleteschema; +CREATE TABLE c (i INT) DISTRIBUTED BY (i); +-- expect failed +INSERT INTO c SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO c SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: deleteschema +SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO c SELECT generate_series(1,100); +DROP TABLE c; +RESET search_path; +DROP SCHEMA deleteschema; diff --git a/tests/regress/expected7/test_drop_after_pause.out b/tests/regress/expected7/test_drop_after_pause.out new file mode 100644 index 00000000000..24cbb191ab2 --- /dev/null +++ b/tests/regress/expected7/test_drop_after_pause.out @@ -0,0 +1,64 @@ +CREATE DATABASE test_drop_after_pause; +\c test_drop_after_pause +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: 25290 (seg2 127.0.0.1:7004 pid=1905198) +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_after_pause; diff --git a/tests/regress/expected7/test_drop_extension.out b/tests/regress/expected7/test_drop_extension.out new file mode 100644 index 00000000000..b946654c7f3 --- /dev/null +++ b/tests/regress/expected7/test_drop_extension.out @@ -0,0 +1,13 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; diff --git a/tests/regress/expected7/test_drop_table.out b/tests/regress/expected7/test_drop_table.out new file mode 100644 index 00000000000..d50db9e1b64 --- /dev/null +++ b/tests/regress/expected7/test_drop_table.out @@ -0,0 +1,34 @@ +-- Test Drop table +CREATE SCHEMA sdrtbl; +SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO sdrtbl; +CREATE TABLE a(i INT) DISTRIBUTED BY (i); +CREATE TABLE a2(i INT) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: sdrtbl +DROP TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO a2 SELECT generate_series(1,100); +DROP TABLE a2; +RESET search_path; +DROP SCHEMA sdrtbl; diff --git a/tests/regress/expected7/test_extension.out b/tests/regress/expected7/test_extension.out new file mode 100644 index 00000000000..fbd8483f6c4 --- /dev/null +++ b/tests/regress/expected7/test_extension.out @@ -0,0 +1,523 @@ +-- NOTE: when test this script, you must make sure that there is no diskquota +-- worker process. +CREATE DATABASE dbx0 ; +CREATE DATABASE dbx1 ; +CREATE DATABASE dbx2 ; +CREATE DATABASE dbx3 ; +CREATE DATABASE dbx4 ; +CREATE DATABASE dbx5 ; +CREATE DATABASE dbx6 ; +CREATE DATABASE dbx7 ; +CREATE DATABASE dbx8 ; +CREATE DATABASE dbx9 ; +CREATE DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation +20230117:12:50:10:1924108 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 20 --skipvalidation' +\! gpstop -arf +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore +\c +show max_worker_processes; + max_worker_processes +---------------------- + 20 +(1 row) + +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 20 +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx0 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx1 +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +INSERT INTO SX.a values(generate_series(0, 100000)); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx3 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx4 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx5 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx6 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx7 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx8 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE SCHEMA SX; +CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); +SELECT diskquota.set_schema_quota('SX', '1MB'); + set_schema_quota +------------------ + +(1 row) + +INSERT INTO SX.a values(generate_series(0, 100000)); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO SX.a values(generate_series(0, 10)); +ERROR: schema's disk space quota exceeded with name: sx +DROP TABLE SX.a; +\c dbx9 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx10 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c dbx0 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx9 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c dbx10 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE dbx0 ; +DROP DATABASE dbx1 ; +DROP DATABASE dbx2 ; +DROP DATABASE dbx3 ; +DROP DATABASE dbx4 ; +DROP DATABASE dbx5 ; +DROP DATABASE dbx6 ; +DROP DATABASE dbx7 ; +DROP DATABASE dbx8 ; +DROP DATABASE dbx9 ; +DROP DATABASE dbx10 ; +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +20230117:12:52:37:1941441 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' +\! gpstop -arf; +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore +\c +show diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + diff --git a/tests/regress/expected7/test_fast_disk_check.out b/tests/regress/expected7/test_fast_disk_check.out new file mode 100644 index 00000000000..d883934ffcf --- /dev/null +++ b/tests/regress/expected7/test_fast_disk_check.out @@ -0,0 +1,23 @@ +-- Test SCHEMA +CREATE SCHEMA s1; +SET search_path to s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; +WARNING: skipping "__gp_log_segment_ext" --- cannot calculate this foreign table size +WARNING: skipping "__gp_log_master_ext" --- cannot calculate this foreign table size +WARNING: skipping "gp_disk_free" --- cannot calculate this foreign table size + ?column? +---------- + f +(1 row) + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/tests/regress/expected7/test_fetch_table_stat.out b/tests/regress/expected7/test_fetch_table_stat.out new file mode 100644 index 00000000000..b9be7aec6b2 --- /dev/null +++ b/tests/regress/expected7/test_fetch_table_stat.out @@ -0,0 +1,35 @@ +-- +-- 1. Test that when an error occurs in diskquota_fetch_table_stat +-- the error message is preserved for us to debug. +-- +CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); +-- Inject an error to a segment server, since this UDF is only called on segments. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Dispatch diskquota_fetch_table_stat to segments. +-- There should be a warning message from segment server saying: +-- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' +-- We're not interested in the oid here, we aggregate the result by COUNT(*). +SELECT COUNT(*) + FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; + count +------- + 1 +(1 row) + +-- Reset the fault injector to prevent future failure. +SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Do some clean-ups. +DROP TABLE t_error_handling; diff --git a/tests/regress/expected7/test_index.out b/tests/regress/expected7/test_index.out new file mode 100644 index 00000000000..a35ec4f95cd --- /dev/null +++ b/tests/regress/expected7/test_index.out @@ -0,0 +1,133 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/indexspc +-- end_ignore +CREATE SCHEMA indexschema1; +DROP TABLESPACE IF EXISTS indexspc; +NOTICE: tablespace "indexspc" does not exist, skipping +CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; +SET search_path TO indexschema1; +CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); +INSERT INTO test_index_a SELECT generate_series(1,20000); +SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view +WHERE schema_name='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1081344 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1081344 | -1 + test_index_a | 360448 | 0 + test_index_a | 360448 | 1 + test_index_a | 360448 | 2 +(4 rows) + +-- create index for the table, index in default tablespace +CREATE INDEX a_index ON test_index_a(i); +INSERT INTO test_index_a SELECT generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 1441792 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'test_index_a'::regclass +ORDER BY segid; + tableid | size | segid +--------------+---------+------- + test_index_a | 1441792 | -1 + test_index_a | 491520 | 0 + test_index_a | 491520 | 1 + test_index_a | 458752 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'a_index'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + a_index | 1212416 | -1 + a_index | 393216 | 0 + a_index | 393216 | 1 + a_index | 393216 | 2 +(4 rows) + +-- add index to tablespace indexspc +ALTER index a_index SET TABLESPACE indexspc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+-----------------+-------------+----------------------------- + indexschema1 | indexspc | 2 | 2654208 +(1 row) + +SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; + size | segid +---------+------- + 1212416 | -1 + 1441792 | -1 +(2 rows) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded +-- index tablespace quota exceeded +ALTER table test_index_a SET TABLESPACE pg_default; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO test_index_a SELECT generate_series(1,100); +INSERT INTO test_index_a SELECT generate_series(1,200000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO test_index_a SELECT generate_series(1,100); +ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded +RESET search_path; +DROP INDEX indexschema1.a_index; +DROP TABLE indexschema1.test_index_a; +DROP SCHEMA indexschema1; +DROP TABLESPACE indexspc; diff --git a/tests/regress/expected7/test_init_table_size_table.out b/tests/regress/expected7/test_init_table_size_table.out new file mode 100644 index 00000000000..38a45374795 --- /dev/null +++ b/tests/regress/expected7/test_init_table_size_table.out @@ -0,0 +1,71 @@ +-- heap table +CREATE TABLE t(i int) DISTRIBUTED BY (i); +INSERT INTO t SELECT generate_series(1, 100000); +-- heap table index +CREATE INDEX idx on t(i); +-- toast table +CREATE TABLE toast(t text) DISTRIBUTED BY (t); +INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); +-- toast table index +CREATE INDEX toast_idx on toast(t); +-- AO table +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +-- AO table index +CREATE INDEX ao_idx on ao(i); +-- AOCS table +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +-- AOCS table index +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Tables here are fetched by diskquota_fetch_table_stat() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 327680 | -1 + ao | 1558696 | -1 + ao_idx | 2490368 | -1 + aocs | 10649752 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +-- init diskquota.table_size +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- diskquota.table_size should not change after init_table_size_table() +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' +ORDER BY tableid; + tableid | size | segid +-----------+----------+------- + t | 3932160 | -1 + idx | 2490368 | -1 + toast | 393216 | -1 + toast_idx | 327680 | -1 + ao | 1558696 | -1 + ao_idx | 2490368 | -1 + aocs | 10649752 | -1 + aocs_idx | 524288 | -1 +(8 rows) + +DROP TABLE t; +DROP TABLE toast; +DROP TABLE ao; +DROP TABLE aocs; diff --git a/tests/regress/expected7/test_many_active_tables.out b/tests/regress/expected7/test_many_active_tables.out new file mode 100644 index 00000000000..f3298c1ce52 --- /dev/null +++ b/tests/regress/expected7/test_many_active_tables.out @@ -0,0 +1,31 @@ +CREATE TABLE t1 (pk int, val int) +DISTRIBUTED BY (pk) +PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); +INSERT INTO t1 +SELECT pk, val +FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + +DROP TABLE t1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; + ?column? +---------- + t +(1 row) + diff --git a/tests/regress/expected7/test_mistake.out b/tests/regress/expected7/test_mistake.out new file mode 100644 index 00000000000..fab4c6eb2f7 --- /dev/null +++ b/tests/regress/expected7/test_mistake.out @@ -0,0 +1,34 @@ +-- to make sure that the schema 'notfoundns' is really not found +select nspname from pg_namespace where nspname = 'notfoundns'; + nspname +--------- +(0 rows) + +select diskquota.set_schema_quota('notfoundns', '1 MB'); +ERROR: schema "notfoundns" does not exist +DROP SCHEMA IF EXISTS nmistake; +NOTICE: schema "nmistake" does not exist, skipping +CREATE SCHEMA nmistake; +select diskquota.set_schema_quota('nmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +DROP ROLE IF EXISTS rmistake; +NOTICE: role "rmistake" does not exist, skipping +CREATE ROLE rmistake; +NOTICE: resource queue required -- using default resource queue "pg_default" +select diskquota.set_role_quota('rmistake', '0 MB'); +ERROR: disk quota can not be set to 0 MB +-- start_ignore +\! mkdir -p /tmp/spcmistake +-- end_ignore +DROP TABLESPACE IF EXISTS spcmistake; +NOTICE: tablespace "spcmistake" does not exist, skipping +CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; +SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); +ERROR: disk quota can not be set to 0 MB +SELECT diskquota.set_per_segment_quota('spcmistake', 0); +ERROR: per segment quota ratio can not be set to 0 +DROP SCHEMA nmistake; +DROP ROLE rmistake; +DROP TABLESPACE spcmistake; diff --git a/tests/regress/expected7/test_partition.out b/tests/regress/expected7/test_partition.out new file mode 100644 index 00000000000..322c00c6422 --- /dev/null +++ b/tests/regress/expected7/test_partition.out @@ -0,0 +1,63 @@ +-- Test partition table +CREATE SCHEMA s8; +SELECT diskquota.SET_schema_quota('s8', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s8; +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +)PARTITION BY RANGE (logdate) +( + PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, + PARTITION Mar06 START (date '2006-03-01') INCLUSIVE + END (date '2016-04-01') EXCLUSIVE +); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +-- expect insert fail +INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +ERROR: schema's disk space quota exceeded with name: s8 +-- expect insert fail +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +ERROR: schema's disk space quota exceeded with name: s8 +DELETE FROM measurement WHERE logdate='2006-03-02'; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +VACUUM FULL measurement; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; +INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; +DROP TABLE measurement; +RESET search_path; +DROP SCHEMA s8; diff --git a/tests/regress/expected7/test_pause_and_resume.out b/tests/regress/expected7/test_pause_and_resume.out new file mode 100644 index 00000000000..18ae2573d36 --- /dev/null +++ b/tests/regress/expected7/test_pause_and_resume.out @@ -0,0 +1,70 @@ +-- Test pause and resume. +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- pause extension +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 3932160 | -1 +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,100000); +-- resume extension +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- table size should be updated after resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size +WHERE tableid = 'a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + a | 7569408 | -1 +(1 row) + +RESET search_path; +DROP TABLE s1.a; +DROP SCHEMA s1; diff --git a/tests/regress/expected7/test_pause_and_resume_multiple_db.out b/tests/regress/expected7/test_pause_and_resume_multiple_db.out new file mode 100644 index 00000000000..ed211216848 --- /dev/null +++ b/tests/regress/expected7/test_pause_and_resume_multiple_db.out @@ -0,0 +1,201 @@ +-- need 'contrib_regression' as test database +\c +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE DATABASE test_pause_and_resume; +CREATE DATABASE test_new_create_database; +\c test_pause_and_resume +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c contrib_regression +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c test_pause_and_resume +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +\c contrib_regression +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c test_pause_and_resume +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c contrib_regression +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_pause_and_resume +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; + tableid | size | segid +---------+---------+------- + s1.a | 3932160 | -1 +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +\c test_new_create_database; +CREATE SCHEMA s1; +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +SELECT diskquota.pause(); -- pause extension, onle effect current database + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +-- resume should onle effect current database +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c contrib_regression +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed +SELECT diskquota.resume(); + resume +-------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail +ERROR: schema's disk space quota exceeded with name: s1 +\c test_pause_and_resume +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c test_new_create_database +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP SCHEMA s1 CASCADE; +NOTICE: drop cascades to table s1.a +DROP DATABASE test_pause_and_resume; +DROP DATABASE test_new_create_database; diff --git a/tests/regress/expected7/test_primary_failure.out b/tests/regress/expected7/test_primary_failure.out new file mode 100644 index 00000000000..4e3ffa185d8 --- /dev/null +++ b/tests/regress/expected7/test_primary_failure.out @@ -0,0 +1,126 @@ +CREATE SCHEMA ftsr; +SELECT diskquota.set_schema_quota('ftsr', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO ftsr; +create or replace language plpythonu; +ERROR: could not access file "$libdir/plpython2": No such file or directory +-- +-- pg_ctl: +-- datadir: data directory of process to target with `pg_ctl` +-- command: commands valid for `pg_ctl` +-- command_mode: modes valid for `pg_ctl -m` +-- +create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') +returns text as $$ + import subprocess + if command not in ('stop', 'restart'): + return 'Invalid command input' + + cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir + cmd = cmd + '-W -m %s %s' % (command_mode, command) + + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; +ERROR: language "plpythonu" does not exist +HINT: Use CREATE EXTENSION to load the language into the database. +create or replace function pg_recoverseg(datadir text, command text) +returns text as $$ + import subprocess + cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') +$$ language plpythonu; +ERROR: language "plpythonu" does not exist +HINT: Use CREATE EXTENSION to load the language into the database. +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: ftsr +-- now one of primary is down +select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); +ERROR: function pg_ctl(text, unknown) does not exist +LINE 1: select pg_ctl((select datadir from gp_segment_configuration ... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- switch mirror to primary +select gp_request_fts_probe_scan(); + gp_request_fts_probe_scan +--------------------------- + t +(1 row) + +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | p | u | s + 0 | m | m | u | s +(2 rows) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: ftsr +-- increase quota +SELECT diskquota.set_schema_quota('ftsr', '200 MB'); + set_schema_quota +------------------ + +(1 row) + +-- pull up failed primary +-- start_ignore +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +ERROR: function pg_recoverseg(text, unknown) does not exist +LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); +ERROR: function pg_recoverseg(text, unknown) does not exist +LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); +ERROR: function pg_recoverseg(text, unknown) does not exist +LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); +ERROR: function pg_recoverseg(text, unknown) does not exist +LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- check GPDB status +select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; + content | preferred_role | role | status | mode +---------+----------------+------+--------+------ + 0 | p | p | u | s + 0 | m | m | u | s +(2 rows) + +-- end_ignore +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; + quota_in_mb | nspsize_in_bytes +-------------+------------------ + 200 | 3932160 +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP SCHEMA ftsr CASCADE; diff --git a/tests/regress/expected7/test_quota_view_no_table.out b/tests/regress/expected7/test_quota_view_no_table.out new file mode 100644 index 00000000000..27a0b315f5b --- /dev/null +++ b/tests/regress/expected7/test_quota_view_no_table.out @@ -0,0 +1,64 @@ +CREATE ROLE no_table SUPERUSER; +CREATE SCHEMA no_table; +SELECT diskquota.set_schema_quota('no_table', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT schema_name, quota_in_mb, nspsize_in_bytes +FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_role_quota('no_table', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT role_name, quota_in_mb, rolsize_in_bytes +FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + no_table | 1 | 0 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes +FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes +FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + no_table | pg_default | 1 | 0 +(1 row) + +DROP ROLE no_table; +DROP SCHEMA no_table; +-- Wait until the quota configs are removed from the memory +-- automatically after DROP. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + diff --git a/tests/regress/expected7/test_readiness_logged.out b/tests/regress/expected7/test_readiness_logged.out new file mode 100644 index 00000000000..c798f08b0ee --- /dev/null +++ b/tests/regress/expected7/test_readiness_logged.out @@ -0,0 +1,38 @@ +CREATE DATABASE test_readiness_logged; +\c test_readiness_logged +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +CREATE EXTENSION diskquota_test; +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + count +------- + 1 +(1 row) + +\! gpstop -raf > /dev/null +\c +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +SELECT count(*) FROM gp_toolkit.gp_log_database +WHERE logmessage = '[diskquota] diskquota is not ready'; + count +------- + 2 +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_readiness_logged; diff --git a/tests/regress/expected7/test_recreate.out b/tests/regress/expected7/test_recreate.out new file mode 100644 index 00000000000..c69cd82e77e --- /dev/null +++ b/tests/regress/expected7/test_recreate.out @@ -0,0 +1,27 @@ +\c +CREATE DATABASE test_recreate; +\c diskquota +INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; +\c test_recreate +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_recreate; diff --git a/tests/regress/expected7/test_rejectmap.out b/tests/regress/expected7/test_rejectmap.out new file mode 100644 index 00000000000..f7dbccbe783 --- /dev/null +++ b/tests/regress/expected7/test_rejectmap.out @@ -0,0 +1,292 @@ +-- +-- This file contains tests for dispatching and quering rejectmap. +-- +CREATE SCHEMA s_rejectmap; +SET search_path TO s_rejectmap; +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) + RETURNS text AS $$ + BEGIN + RETURN COALESCE( + REGEXP_REPLACE(given_name, + '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', + '\1' || + (SELECT relname FROM pg_class + WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); + END; +$$ LANGUAGE plpgsql; +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) + RETURNS oid AS +$$ +BEGIN + CASE + WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; + ELSE RETURN ( + CASE tablespaceoid + WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) + ELSE + tablespaceoid + END + ); + END CASE; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) + RETURNS void AS $$ + DECLARE + bt int; + targetoid oid; + tablespaceoid oid; + BEGIN + SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; + CASE block_type + WHEN 'NAMESPACE' THEN + bt = 0; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE' THEN + bt = 1; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'NAMESPACE_TABLESPACE' THEN + bt = 2; + SELECT relnamespace INTO targetoid + FROM pg_class WHERE relname=rel::text; + WHEN 'ROLE_TABLESPACE' THEN + bt = 3; + SELECT relowner INTO targetoid + FROM pg_class WHERE relname=rel::text; + END CASE; + PERFORM diskquota.refresh_rejectmap( + ARRAY[ + ROW(targetoid, + (SELECT oid FROM pg_database WHERE datname=current_database()), + (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), + bt, + false) + ]::diskquota.rejectmap_entry[], + ARRAY[rel]::oid[]) + FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + END; $$ +LANGUAGE 'plpgsql'; +-- +-- 1. Create an ordinary table and add its oid to rejectmap on seg0. +-- Check that it's relfilenode is blocked on seg0 by various conditions. +-- +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | namespace_matched +------------+-----------------+------------------- + blocked_t1 | NAMESPACE_QUOTA | t +(1 row) + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. +SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | owner_matched +------------+-------------+--------------- + blocked_t1 | ROLE_QUOTA | t +(1 row) + +-- Create a tablespace to test the rest of blocking types. +\! mkdir -p /tmp/blocked_space +CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; +ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | namespace_matched | tablespace_matched +------------+----------------------------+-------------------+-------------------- + blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t +(1 row) + +-- Insert an entry for blocked_t1 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. +SELECT rel.relname, be.target_type, + (be.target_oid=rel.relowner) AS owner_matched, + (be.tablespace_oid=rel.reltablespace) AS tablespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; + relname | target_type | owner_matched | tablespace_matched +------------+-----------------------+---------------+-------------------- + blocked_t1 | ROLE_TABLESPACE_QUOTA | t | t +(1 row) + +-- +-- 2. Test that the relfilenodes of toast relation together with its +-- index are blocked on seg0. +-- +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +-- Insert an entry for blocked_t2 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +---------------------------+---------+-----------------+------------------- + pg_toast_blocked_t2_index | i | NAMESPACE_QUOTA | f + pg_toast_blocked_t2 | t | NAMESPACE_QUOTA | f + blocked_t2 | r | NAMESPACE_QUOTA | t +(3 rows) + +-- +-- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE INDEX blocked_t3_index ON blocked_t3(i); +-- Insert an entry for blocked_t3 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly +-- index relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t3 | M | NAMESPACE_QUOTA | f + pg_aoseg_blocked_t3 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t3 | b | NAMESPACE_QUOTA | f + blocked_t3 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t4_index ON blocked_t4(i); +-- Insert an entry for blocked_t4 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly +-- index relation are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t4 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t4 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t4 | b | NAMESPACE_QUOTA | f + blocked_t4 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- +-- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its +-- auxiliary relations are blocked on seg0. +-- +CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE INDEX blocked_t5_index ON blocked_t5(i); +-- Insert an entry for blocked_t5 to rejectmap on seg0. +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); + block_relation_on_seg0 +------------------------ + +(1 row) + +-- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast +-- index relation and appendonly relations are blocked on seg0 by its namespace. +SELECT replace_oid_with_relname(rel.relname), + rel.relkind, be.target_type, + (be.target_oid=rel.relnamespace) AS namespace_matched + FROM gp_dist_random('pg_class') AS rel, + gp_dist_random('diskquota.rejectmap') AS be + WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid + ORDER BY rel.relname DESC; + replace_oid_with_relname | relkind | target_type | namespace_matched +-------------------------------+---------+-----------------+------------------- + pg_aovisimap_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aovisimap_blocked_t5 | M | NAMESPACE_QUOTA | f + pg_aocsseg_blocked_t5 | o | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5_index | i | NAMESPACE_QUOTA | f + pg_aoblkdir_blocked_t5 | b | NAMESPACE_QUOTA | f + blocked_t5 | r | NAMESPACE_QUOTA | t +(6 rows) + +-- Do some clean-ups. +DROP FUNCTION replace_oid_with_relname(text); +DROP FUNCTION block_relation_on_seg0(regclass, text); +DROP FUNCTION get_real_tablespace_oid(text, oid); +DROP TABLE blocked_t1; +DROP TABLE blocked_t2; +DROP TABLE blocked_t3; +DROP TABLE blocked_t4; +DROP TABLE blocked_t5; +DROP TABLESPACE blocked_space; +SET search_path TO DEFAULT; +DROP SCHEMA s_rejectmap; diff --git a/tests/regress/expected7/test_rejectmap_mul_db.out b/tests/regress/expected7/test_rejectmap_mul_db.out new file mode 100644 index 00000000000..ed2edc8df33 --- /dev/null +++ b/tests/regress/expected7/test_rejectmap_mul_db.out @@ -0,0 +1,92 @@ +-- One db's rejectmap update should not impact on other db's rejectmap +CREATE DATABASE tjmu1; +CREATE DATABASE tjmu2; +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +-- increase the naptime to avoid active table gets cleared by tjmu1's worker +\! gpconfig -c "diskquota.naptime" -v 1 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu1 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:7003 pid=1961462) +-- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 1 +(1 row) + +\c tjmu2 +CREATE EXTENSION diskquota; +SELECT diskquota.set_schema_quota('public', '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- Trigger hard limit to dispatch rejectmap for tjmu2 +INSERT INTO b SELECT generate_series(1, 100000000); -- fail +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=1961759) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +--\c tjmu1 +-- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 +-- The entries for tjmu1 should not be cleared +SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; + count +------- + 2 +(1 row) + +-- start_ignore +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpconfig -c "diskquota.naptime" -v 0 > /dev/null +\! gpstop -u > /dev/null +-- end_ignore +\c tjmu1 +DROP EXTENSION diskquota; +\c tjmu2 +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE tjmu1; +DROP DATABASE tjmu2; diff --git a/tests/regress/expected7/test_relation_cache.out b/tests/regress/expected7/test_relation_cache.out new file mode 100644 index 00000000000..5f0c3124066 --- /dev/null +++ b/tests/regress/expected7/test_relation_cache.out @@ -0,0 +1,127 @@ +-- init +CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() +RETURNS boolean +as $$ +declare t1 oid[]; +declare t2 oid[]; +begin +t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); +t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); +return t1 = t2; +end; +$$ LANGUAGE plpgsql; +-- heap table +begin; +create table t(i int) DISTRIBUTED BY (i); +insert into t select generate_series(1, 100000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 3 +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- toast table +begin; +create table t(t text) DISTRIBUTED BY (t); +insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 9 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AO table +begin; +create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 18 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +-- AOCS table +begin; +create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); +insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 12 +(1 row) + +select diskquota.check_relation_cache(); + check_relation_cache +---------------------- + t +(1 row) + +commit; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +select count(*) from diskquota.show_relation_cache_all_seg(); + count +------- + 0 +(1 row) + +drop table t; +DROP FUNCTION diskquota.check_relation_cache(); diff --git a/tests/regress/expected7/test_relation_size.out b/tests/regress/expected7/test_relation_size.out new file mode 100644 index 00000000000..27b4a4eb7de --- /dev/null +++ b/tests/regress/expected7/test_relation_size.out @@ -0,0 +1,99 @@ +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 688128 +(1 row) + +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); + relation_size +--------------- + 688128 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 688128 +(1 row) + +-- start_ignore +\! mkdir -p /tmp/test_spc +-- end_ignore +DROP TABLESPACE IF EXISTS test_spc; +NOTICE: tablespace "test_spc" does not exist, skipping +CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; +ALTER TABLE t1 SET TABLESPACE test_spc; +INSERT INTO t1 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t1'); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 1081344 +(1 row) + +ALTER TABLE t2 SET TABLESPACE test_spc; +INSERT INTO t2 SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('t2'); + relation_size +--------------- + 1081344 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 1081344 +(1 row) + +DROP TABLE t1, t2; +DROP TABLESPACE test_spc; +-- start_ignore +\! rm -rf /tmp/test_spc + -- end_ignore +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 10000); +SELECT diskquota.relation_size('ao'); + relation_size +--------------- + 100200 +(1 row) + +SELECT pg_relation_size('ao'); + pg_relation_size +------------------ + 100200 +(1 row) + +DROP TABLE ao; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.relation_size('aocs'); + relation_size +--------------- + 10092696 +(1 row) + +SELECT pg_relation_size('aocs'); + pg_relation_size +------------------ + 10092696 +(1 row) + +DROP TABLE aocs; diff --git a/tests/regress/expected7/test_rename.out b/tests/regress/expected7/test_rename.out new file mode 100644 index 00000000000..1e9ab7ae7c3 --- /dev/null +++ b/tests/regress/expected7/test_rename.out @@ -0,0 +1,71 @@ +-- test rename schema +CREATE SCHEMA srs1; +SELECT diskquota.set_schema_quota('srs1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +set search_path to srs1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs1 +ALTER SCHEMA srs1 RENAME TO srs2; +SET search_path TO srs2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: srs2 +DROP TABLE a2; +RESET search_path; +DROP SCHEMA srs2; +-- test rename role +CREATE SCHEMA srr1; +CREATE ROLE srerole NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('srerole', '1MB'); + set_role_quota +---------------- + +(1 row) + +SET search_path TO srr1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +ALTER TABLE a OWNER TO srerole; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole +ALTER ROLE srerole RENAME TO srerole2; +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole2 +-- test rename table +ALTER TABLE a RENAME TO a2; +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,10); +ERROR: role's disk space quota exceeded with name: srerole2 +DROP TABLE a2; +DROP ROLE srerole2; +RESET search_path; +DROP SCHEMA srr1; diff --git a/tests/regress/expected7/test_reschema.out b/tests/regress/expected7/test_reschema.out new file mode 100644 index 00000000000..6b88a8080b6 --- /dev/null +++ b/tests/regress/expected7/test_reschema.out @@ -0,0 +1,39 @@ +-- Test re-set_schema_quota +CREATE SCHEMA srE; +SELECT diskquota.set_schema_quota('srE', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO srE; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail when exceed quota limit +INSERT INTO a SELECT generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name: sre +-- set schema quota larger +SELECT diskquota.set_schema_quota('srE', '1 GB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,1000); +DROP TABLE a; +RESET search_path; +DROP SCHEMA srE; diff --git a/tests/regress/expected7/test_role.out b/tests/regress/expected7/test_role.out new file mode 100644 index 00000000000..e51d4685586 --- /dev/null +++ b/tests/regress/expected7/test_role.out @@ -0,0 +1,138 @@ +-- Test role quota +CREATE SCHEMA srole; +SET search_path TO srole; +CREATE ROLE u1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE u2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO u1; +CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO u1; +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +-- Delete role quota +SELECT diskquota.set_role_quota('u1', '-1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Reset role quota +SELECT diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u1 +SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + u1 | 1 | 4194304 +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + b | 4063232 | -1 + b | 1343488 | 0 + b | 1343488 | 1 + b | 1343488 | 2 +(4 rows) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 'b2'::regclass +ORDER BY segid; + tableid | size | segid +---------+--------+------- + b2 | 131072 | -1 + b2 | 32768 | 0 + b2 | 32768 | 1 + b2 | 32768 | 2 +(4 rows) + +ALTER TABLE b OWNER TO u2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +-- superuser is blocked to set quota +--start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + zhrt +(1 row) + +--end_ignore +\gset +select diskquota.set_role_quota(:'rolname', '1mb'); +ERROR: Can not set disk quota for system owner: zhrt +select diskquota.set_role_quota(:'rolname', '-1mb'); + set_role_quota +---------------- + +(1 row) + +CREATE ROLE "Tn" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail +ERROR: role "tn" does not exist +SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); + set_role_quota +---------------- + +(1 row) + +DROP TABLE b, b2; +DROP ROLE u1, u2, "Tn"; +RESET search_path; +DROP SCHEMA srole; diff --git a/tests/regress/expected7/test_schema.out b/tests/regress/expected7/test_schema.out new file mode 100644 index 00000000000..866b4b3e127 --- /dev/null +++ b/tests/regress/expected7/test_schema.out @@ -0,0 +1,109 @@ +-- Test schema +CREATE SCHEMA s1; +SET search_path TO s1; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +CREATE TABLE a2(i int) DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: schema's disk space quota exceeded with name: s1 +-- Test alter table set schema +CREATE SCHEMA s2; +ALTER TABLE s1.a SET SCHEMA s2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO s2.a SELECT generate_series(1,200); +-- prepare a schema that has reached quota limit +CREATE SCHEMA badquota; +DROP ROLE IF EXISTS testbody; +NOTICE: role "testbody" does not exist, skipping +CREATE ROLE testbody; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); +ALTER TABLE badquota.t1 OWNER TO testbody; +INSERT INTO badquota.t1 SELECT generate_series(0, 100000); +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota('badquota', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT size, segid FROM diskquota.table_size + WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') + ORDER BY segid DESC; + size | segid +---------+------- + 1310720 | 2 + 1310720 | 1 + 1310720 | 0 + 3932160 | -1 +(4 rows) + +-- expect fail +INSERT INTO badquota.t1 SELECT generate_series(0, 10); +ERROR: schema's disk space quota exceeded with name: badquota +ALTER TABLE s2.a SET SCHEMA badquota; +-- expect failed +INSERT INTO badquota.a SELECT generate_series(0, 100); +ERROR: schema's disk space quota exceeded with name: badquota +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; + schema_name | quota_in_mb +-------------+------------- + s1 | 1 +(1 row) + +CREATE SCHEMA "Tn1"; +SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); + set_schema_quota +------------------ + +(1 row) + +RESET search_path; +DROP TABLE s1.a2, badquota.a; +DROP SCHEMA s1, s2, "Tn1"; +DROP TABLE badquota.t1; +DROP ROLE testbody; +DROP SCHEMA badquota; diff --git a/tests/regress/expected7/test_show_status.out b/tests/regress/expected7/test_show_status.out new file mode 100644 index 00000000000..14c3e7de9fd --- /dev/null +++ b/tests/regress/expected7/test_show_status.out @@ -0,0 +1,67 @@ +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | on +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + +select from diskquota.pause(); +-- +(1 row) + +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | off +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | paused +(2 rows) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | paused + hard limits | off +(2 rows) + +select from diskquota.resume(); +-- +(1 row) + +\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null +\! gpstop -u > /dev/null +select * from diskquota.status() where name not like '%version'; + name | status +-------------+-------- + soft limits | on + hard limits | off +(2 rows) + diff --git a/tests/regress/expected7/test_tablespace_diff_schema.out b/tests/regress/expected7/test_tablespace_diff_schema.out new file mode 100644 index 00000000000..93da486b836 --- /dev/null +++ b/tests/regress/expected7/test_tablespace_diff_schema.out @@ -0,0 +1,87 @@ +-- allow set quota for different schema in the same tablespace +-- delete quota for one schema will not drop other quotas with different schema in the same tablespace +-- start_ignore +\! mkdir -p /tmp/spc_diff_schema +-- end_ignore +CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; +CREATE SCHEMA schema_in_tablespc; +SET search_path TO schema_in_tablespc; +CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- with hardlimits off, expect to success +INSERT INTO a SELECT generate_series(1,1000000); +-- wait for next loop for bgworker to add it to rejectmap +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema + schema_in_tablespc | pg_default +(2 rows) + +SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name +--------------------+----------------- + schema_in_tablespc | spc_diff_schema +(1 row) + +-- expect to fail +INSERT INTO a SELECT generate_series(1,1000000); +ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded +reset search_path; +DROP TABLE IF EXISTS schema_in_tablespc.a; +DROP tablespace IF EXISTS spc_diff_schema; +DROP SCHEMA IF EXISTS schema_in_tablespc; +-- start_ignore +\! rmdir /tmp/spc_diff_schema + -- end_ignore diff --git a/tests/regress/expected7/test_tablespace_role.out b/tests/regress/expected7/test_tablespace_role.out new file mode 100644 index 00000000000..b926890bc81 --- /dev/null +++ b/tests/regress/expected7/test_tablespace_role.out @@ -0,0 +1,194 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc; +NOTICE: tablespace "rolespc" does not exist, skipping +CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; +CREATE SCHEMA rolespcrole; +SET search_path TO rolespcrole; +DROP ROLE IF EXISTS rolespcu1; +NOTICE: role "rolespcu1" does not exist, skipping +DROP ROLE IF EXISTS rolespcu2; +NOTICE: role "rolespcu2" does not exist, skipping +CREATE ROLE rolespcu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespcu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); +ALTER TABLE b2 OWNER TO rolespcu1; +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- expect insert fail +INSERT INTO b2 SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test show_fast_role_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + rolespcu1 | rolespc | 1 | 4194304 +(1 row) + +-- Test alter owner +ALTER TABLE b OWNER TO rolespcu2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- expect insert succeed +INSERT INTO b2 SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespcu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc2; +NOTICE: tablespace "rolespc2" does not exist, skipping +CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; +ALTER TABLE b SET TABLESPACE rolespc2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc +ALTER TABLE b SET TABLESPACE rolespc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- superuser is blocked to set quota +-- start_ignore +SELECT rolname from pg_roles where rolsuper=true; + rolname +--------- + zhrt +(1 row) + +-- end_ignore +\gset +select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); +ERROR: Can not set disk quota for system owner: zhrt +-- start_ignore +\! mkdir -p /tmp/rolespc3 +-- end_ignore +DROP ROLE IF EXISTS "Rolespcu3"; +NOTICE: role "Rolespcu3" does not exist, skipping +CREATE ROLE "Rolespcu3" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +DROP TABLESPACE IF EXISTS "Rolespc3"; +NOTICE: tablespace "Rolespc3" does not exist, skipping +CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; +SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +DROP TABLE b, b2; +DROP ROLE rolespcu1, rolespcu2; +RESET search_path; +DROP SCHEMA rolespcrole; +DROP TABLESPACE rolespc; +DROP TABLESPACE rolespc2; +DROP TABLESPACE "Rolespc3"; diff --git a/tests/regress/expected7/test_tablespace_role_perseg.out b/tests/regress/expected7/test_tablespace_role_perseg.out new file mode 100644 index 00000000000..c30030325d7 --- /dev/null +++ b/tests/regress/expected7/test_tablespace_role_perseg.out @@ -0,0 +1,235 @@ +-- Test role quota +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg; +NOTICE: tablespace "rolespc_perseg" does not exist, skipping +CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; +CREATE SCHEMA rolespc_persegrole; +SET search_path TO rolespc_persegrole; +DROP ROLE IF EXISTS rolespc_persegu1; +NOTICE: role "rolespc_persegu1" does not exist, skipping +DROP ROLE IF EXISTS rolespc_persegu2; +NOTICE: role "rolespc_persegu2" does not exist, skipping +CREATE ROLE rolespc_persegu1 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE ROLE rolespc_persegu2 NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +INSERT INTO b SELECT generate_series(1,100); +-- expect insert success +INSERT INTO b SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded +-- change tablespace role quota +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- Test show_fast_schema_tablespace_quota_view +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter owner +ALTER TABLE b OWNER TO rolespc_persegu2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +ALTER TABLE b OWNER TO rolespc_persegu1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS rolespc_perseg2; +NOTICE: tablespace "rolespc_perseg2" does not exist, skipping +CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; +ALTER TABLE b SET TABLESPACE rolespc_perseg2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO b SELECT generate_series(1,100); +-- alter table b back to tablespace rolespc_perseg +ALTER TABLE b SET TABLESPACE rolespc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +------------------+-----------------+-------------+----------------------------- + rolespc_persegu1 | rolespc_perseg | 10 | 4063232 +(1 row) + +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO b SELECT generate_series(1,100); +ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota +-- Test delete quota config +SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO b SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/rolespc_perseg3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; +NOTICE: tablespace "Rolespc_perseg3" does not exist, skipping +CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; +CREATE ROLE "Rolespc_persegu3" NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); + set_per_segment_quota +----------------------- + +(1 row) + +DROP table b; +DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; +RESET search_path; +DROP SCHEMA rolespc_persegrole; +DROP TABLESPACE rolespc_perseg; +DROP TABLESPACE rolespc_perseg2; +DROP TABLESPACE "Rolespc_perseg3"; diff --git a/tests/regress/expected7/test_tablespace_schema.out b/tests/regress/expected7/test_tablespace_schema.out new file mode 100644 index 00000000000..a7e57c594be --- /dev/null +++ b/tests/regress/expected7/test_tablespace_schema.out @@ -0,0 +1,147 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc +-- end_ignore +CREATE SCHEMA spcs1; +DROP TABLESPACE IF EXISTS schemaspc; +NOTICE: tablespace "schemaspc" does not exist, skipping +CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; +SET search_path TO spcs1; +CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); +-- expect insert fail +INSERT INTO a2 SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test alter table set schema +CREATE SCHEMA spcs2; +ALTER TABLE spcs1.a SET SCHEMA spcs2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a2 SELECT generate_series(1,200); +-- expect insert succeed +INSERT INTO spcs2.a SELECT generate_series(1,200); +ALTER TABLE spcs2.a SET SCHEMA spcs1; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + spcs1 | schemaspc | 1 | 4030464 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc2 +-- end_ignore +DROP TABLESPACE IF EXISTS schemaspc2; +NOTICE: tablespace "schemaspc2" does not exist, skipping +CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; +ALTER TABLE a SET TABLESPACE schemaspc2; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test update quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,1000000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +-- Test delete quota config +SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +-- start_ignore +\! mkdir -p /tmp/schemaspc3 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc3"; +NOTICE: tablespace "Schemaspc3" does not exist, skipping +CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; +CREATE SCHEMA "Spcs2"; +SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +RESET search_path; +DROP TABLE spcs1.a2, spcs1.a; +DROP SCHEMA spcs1, spcs2; +DROP TABLESPACE schemaspc; +DROP TABLESPACE schemaspc2; +DROP TABLESPACE "Schemaspc3"; diff --git a/tests/regress/expected7/test_tablespace_schema_perseg.out b/tests/regress/expected7/test_tablespace_schema_perseg.out new file mode 100644 index 00000000000..c27f3e0ea9e --- /dev/null +++ b/tests/regress/expected7/test_tablespace_schema_perseg.out @@ -0,0 +1,282 @@ +-- Test schema +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg +-- end_ignore +-- Test tablespace quota perseg +CREATE SCHEMA spcs1_perseg; +DROP TABLESPACE IF EXISTS schemaspc_perseg; +NOTICE: tablespace "schemaspc_perseg" does not exist, skipping +CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SET search_path TO spcs1_perseg; +CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100); +-- expect insert success +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail by tablespace schema diskquota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded +-- change tablespace schema quota +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail by tablespace schema perseg quota +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test alter table set schema +CREATE SCHEMA spcs2_perseg; +ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); +ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +--------------+------------------+-------------+----------------------------- + spcs1_perseg | schemaspc_perseg | 10 | 3932160 +(1 row) + +-- Test alter tablespace +-- start_ignore +\! mkdir -p /tmp/schemaspc_perseg2 +-- end_ignore +DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; +NOTICE: tablespace "Schemaspc_perseg2" does not exist, skipping +CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; +ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,200); +ALTER TABLE a SET TABLESPACE schemaspc_perseg; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,200); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test update per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test delete per segment ratio +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +---- expect insert fail +INSERT INTO a SELECT generate_series(1,100); +ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota +-- Test delete tablespace schema quota +SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT generate_series(1,100); +SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- +(0 rows) + +-- test config per segment quota +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 1 +(1 row) + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- +(0 rows) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 0 +(1 row) + +SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); + set_per_segment_quota +----------------------- + +(1 row) + +SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; + segratio +---------- + 3 +(1 row) + +SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target + WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND + diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; + segratio +---------- + 3 +(1 row) + +SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); + tablespace_name | per_seg_quota_ratio +-------------------+--------------------- + schemaspc_perseg | 2 + Schemaspc_perseg2 | 3 +(2 rows) + +RESET search_path; +DROP TABLE spcs1_perseg.a; +DROP SCHEMA spcs1_perseg; +DROP TABLESPACE schemaspc_perseg; +DROP TABLESPACE "Schemaspc_perseg2"; diff --git a/tests/regress/expected7/test_temp_role.out b/tests/regress/expected7/test_temp_role.out new file mode 100644 index 00000000000..4493325717e --- /dev/null +++ b/tests/regress/expected7/test_temp_role.out @@ -0,0 +1,40 @@ +-- Test temp table restrained by role id +CREATE SCHEMA strole; +CREATE ROLE u3temp NOLOGIN; +NOTICE: resource queue required -- using default resource queue "pg_default" +SET search_path TO strole; +SELECT diskquota.set_role_quota('u3temp', '1MB'); + set_role_quota +---------------- + +(1 row) + +CREATE TABLE a(i int) DISTRIBUTED BY (i); +ALTER TABLE a OWNER TO u3temp; +CREATE TEMP TABLE ta(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +ALTER TABLE ta OWNER TO u3temp; +-- expected failed: fill temp table +INSERT INTO ta SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expected failed: +INSERT INTO a SELECT generate_series(1,100); +ERROR: role's disk space quota exceeded with name: u3temp +DROP TABLE ta; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +INSERT INTO a SELECT generate_series(1,100); +DROP TABLE a; +DROP ROLE u3temp; +RESET search_path; +DROP SCHEMA strole; diff --git a/tests/regress/expected7/test_toast.out b/tests/regress/expected7/test_toast.out new file mode 100644 index 00000000000..df0b0c154c2 --- /dev/null +++ b/tests/regress/expected7/test_toast.out @@ -0,0 +1,31 @@ +-- Test toast +CREATE SCHEMA s5; +SELECT diskquota.set_schema_quota('s5', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s5; +CREATE TABLE a5 (t text) DISTRIBUTED BY (t); +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,10000)) +FROM generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert toast fail +INSERT INTO a5 +SELECT (SELECT + string_agg(chr(floor(random() * 26)::int + 65), '') + FROM generate_series(1,1000)) +FROM generate_series(1,1000); +ERROR: schema's disk space quota exceeded with name: s5 +DROP TABLE a5; +RESET search_path; +DROP SCHEMA s5; diff --git a/tests/regress/expected7/test_truncate.out b/tests/regress/expected7/test_truncate.out new file mode 100644 index 00000000000..a9fd12392d6 --- /dev/null +++ b/tests/regress/expected7/test_truncate.out @@ -0,0 +1,36 @@ +-- Test truncate +CREATE SCHEMA s7; +SELECT diskquota.set_schema_quota('s7', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s7; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,30); +ERROR: schema's disk space quota exceeded with name: s7 +INSERT INTO b SELECT generate_series(1,30); +ERROR: schema's disk space quota exceeded with name: s7 +TRUNCATE TABLE a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,30); +INSERT INTO b SELECT generate_series(1,30); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s7; diff --git a/tests/regress/expected7/test_uncommitted_table_size.out b/tests/regress/expected7/test_uncommitted_table_size.out new file mode 100644 index 00000000000..43daf5ef234 --- /dev/null +++ b/tests/regress/expected7/test_uncommitted_table_size.out @@ -0,0 +1,236 @@ +-- temp table +begin; +CREATE TEMP TABLE t1(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT INTO t1 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t1 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t1'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +DROP table t1; +-- heap table +begin; +CREATE TABLE t2(i int) DISTRIBUTED BY (i); +INSERT INTO t2 SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + t2 | 3932160 | -1 +(1 row) + +SELECT pg_table_size('t2'); + pg_table_size +--------------- + 3932160 +(1 row) + +commit; +-- heap table index +begin; +CREATE INDEX idx2 on t2(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + idx2 | 2490368 | -1 +(1 row) + +SELECT pg_table_size('idx2'); + pg_table_size +--------------- + 2490368 +(1 row) + +commit; +DROP table t2; +-- toast table +begin; +CREATE TABLE t3(t text) DISTRIBUTED BY (t); +INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + t3 | 393216 | -1 +(1 row) + +SELECT pg_table_size('t3'); + pg_table_size +--------------- + 393216 +(1 row) + +commit; +DROP table t3; +-- AO table +begin; +CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); +INSERT INTO ao SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t +(1 row) + +commit; +-- AO table index +begin; +CREATE INDEX ao_idx on ao(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao_idx | 2490368 | -1 +(1 row) + +SELECT pg_table_size('ao_idx'); + pg_table_size +--------------- + 2490368 +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + ao | 1558696 | -1 +(1 row) + +SELECT pg_table_size('ao'); + pg_table_size +--------------- + 1558696 +(1 row) + +commit; +DROP TABLE ao; +-- AO table CTAS +begin; +CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= + (SELECT pg_table_size('ao')); + ?column? +---------- + t +(1 row) + +commit; +DROP TABLE ao; +-- AOCS table +begin; +CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+----------+------- + aocs | 10322072 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 10322072 +(1 row) + +commit; +-- AOCS table index +begin; +CREATE INDEX aocs_idx on aocs(i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; + tableid | size | segid +----------+--------+------- + aocs_idx | 524288 | -1 +(1 row) + +SELECT pg_table_size('aocs_idx'); + pg_table_size +--------------- + 524288 +(1 row) + +commit; +DROP TABLE aocs; +-- AOCS table CTAS +begin; +CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; + tableid | size | segid +---------+--------+------- + aocs | 632864 | -1 +(1 row) + +SELECT pg_table_size('aocs'); + pg_table_size +--------------- + 632864 +(1 row) + +commit; +DROP TABLE aocs; diff --git a/tests/regress/expected7/test_update.out b/tests/regress/expected7/test_update.out new file mode 100644 index 00000000000..5ddb9d8c55b --- /dev/null +++ b/tests/regress/expected7/test_update.out @@ -0,0 +1,23 @@ +-- Test Update +CREATE SCHEMA s4; +SELECT diskquota.set_schema_quota('s4', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s4; +CREATE TABLE a(i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect update fail. +UPDATE a SET i = 100; +ERROR: schema's disk space quota exceeded with name: s4 +DROP TABLE a; +RESET search_path; +DROP SCHEMA s4; diff --git a/tests/regress/expected7/test_update_db_cache.out b/tests/regress/expected7/test_update_db_cache.out new file mode 100644 index 00000000000..785c8bff409 --- /dev/null +++ b/tests/regress/expected7/test_update_db_cache.out @@ -0,0 +1,64 @@ +--start_ignore +CREATE DATABASE test_db_cache; +--end_ignore +\c test_db_cache +CREATE EXTENSION diskquota; +CREATE EXTENSION diskquota_test; +-- Wait until the db cache gets updated +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +CREATE TABLE t(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid +FROM diskquota.table_size +WHERE tableid = 't'::regclass +ORDER BY segid; + tableid | size | segid +---------+---------+------- + t | 3637248 | -1 + t | 1212416 | 0 + t | 1212416 | 1 + t | 1212416 | 2 +(4 rows) + +DROP EXTENSION diskquota; +-- Create table without extension +CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) +DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +-- Should find nothing since t_no_extension is not recorded. +SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) +FROM gp_dist_random('gp_id'); + diskquota_fetch_table_stat +---------------------------- +(0 rows) + +DROP TABLE t; +DROP TABLE t_no_extension; +SELECT diskquota.pause(); + pause +------- + +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_db_cache; diff --git a/tests/regress/expected7/test_vacuum.out b/tests/regress/expected7/test_vacuum.out new file mode 100644 index 00000000000..b032274eed3 --- /dev/null +++ b/tests/regress/expected7/test_vacuum.out @@ -0,0 +1,57 @@ +-- Test vacuum full +CREATE SCHEMA s6; +SELECT diskquota.set_schema_quota('s6', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +SET search_path TO s6; +CREATE TABLE a (i int) DISTRIBUTED BY (i); +CREATE TABLE b (i int) DISTRIBUTED BY (i); +INSERT INTO a SELECT generate_series(1,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert fail +INSERT INTO a SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: s6 +-- expect insert fail +INSERT INTO b SELECT generate_series(1,10); +ERROR: schema's disk space quota exceeded with name: s6 +DELETE FROM a WHERE i > 10; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +VACUUM FULL a; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; + tableid | size | segid +---------+-------+------- + b | 0 | 2 + b | 0 | 1 + b | 0 | 0 + b | 0 | -1 + a | 32768 | 2 + a | 32768 | 1 + a | 32768 | 0 + a | 98304 | -1 +(8 rows) + +-- expect insert succeed +INSERT INTO a SELECT generate_series(1,10); +INSERT INTO b SELECT generate_series(1,10); +DROP TABLE a, b; +RESET search_path; +DROP SCHEMA s6; diff --git a/tests/regress/expected7/test_worker_not_ready.out b/tests/regress/expected7/test_worker_not_ready.out new file mode 100644 index 00000000000..0424cb65d73 --- /dev/null +++ b/tests/regress/expected7/test_worker_not_ready.out @@ -0,0 +1,26 @@ +CREATE DATABASE db_not_ready; +\c db_not_ready; +CREATE TABLE t (i int) DISTRIBUTED BY (i); +CREATE EXTENSION diskquota; +WARNING: [diskquota] diskquota is not ready because current database is not empty +HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota +CREATE EXTENSION diskquota_test; +SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); +ERROR: Can not set disk quota for system owner: zhrt +SELECT diskquota.pause(); + pause +------- + +(1 row) + +-- diskquota.wait_for_worker_new_epoch() cannot be used here because +-- diskquota.state is not clean. +SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); + wait +------ + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE db_not_ready; diff --git a/tests/regress/expected7/test_worker_schedule.out b/tests/regress/expected7/test_worker_schedule.out new file mode 100644 index 00000000000..ad018a37c2a --- /dev/null +++ b/tests/regress/expected7/test_worker_schedule.out @@ -0,0 +1,633 @@ +-- start_ignore +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping +DROP DATABASE IF EXISTS t3; +NOTICE: database "t3" does not exist, skipping +DROP DATABASE IF EXISTS t4; +NOTICE: database "t4" does not exist, skipping +DROP DATABASE IF EXISTS t5; +NOTICE: database "t5" does not exist, skipping +DROP DATABASE IF EXISTS t6; +NOTICE: database "t6" does not exist, skipping +DROP DATABASE IF EXISTS t7; +NOTICE: database "t7" does not exist, skipping +DROP DATABASE IF EXISTS t8; +NOTICE: database "t8" does not exist, skipping +DROP DATABASE IF EXISTS t9; +NOTICE: database "t9" does not exist, skipping +DROP DATABASE IF EXISTS t10; +NOTICE: database "t10" does not exist, skipping +DROP DATABASE IF EXISTS t11; +NOTICE: database "t11" does not exist, skipping +DROP DATABASE IF EXISTS t12; +NOTICE: database "t12" does not exist, skipping +CREATE DATABASE t1; +CREATE DATABASE t2; +CREATE DATABASE t3; +CREATE DATABASE t4; +CREATE DATABASE t5; +CREATE DATABASE t6; +CREATE DATABASE t7; +CREATE DATABASE t8; +CREATE DATABASE t9; +CREATE DATABASE t10; +CREATE DATABASE t11; +CREATE DATABASE t12; +--end_ignore +\c t1 +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 1; +20230117:13:00:12:1977590 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1' +\! gpstop -arf; +20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 1 +(1 row) + +\c t2 +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +CREATE EXTENSION diskquota; +CREATE TABLE f3(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f3 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f3 | 98304 | -1 +(1 row) + +--start_ignore +\! gpconfig -c diskquota.max_workers -v 11; +20230117:13:02:24:1981283 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' +\! gpstop -arf; +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore +\c +SHOW diskquota.max_workers; + diskquota.max_workers +----------------------- + 11 +(1 row) + +\c t4 +CREATE EXTENSION diskquota; +CREATE TABLE f4(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f4 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f4 | 98304 | -1 +(1 row) + +\c t5 +CREATE EXTENSION diskquota; +CREATE TABLE f5(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f5 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f5 | 98304 | -1 +(1 row) + +\c t6 +CREATE EXTENSION diskquota; +CREATE TABLE f6(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f6 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f6 | 98304 | -1 +(1 row) + +\c t7 +CREATE EXTENSION diskquota; +CREATE TABLE f7(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f7 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f7 | 98304 | -1 +(1 row) + +\c t8 +CREATE EXTENSION diskquota; +CREATE TABLE f8(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f8 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f8 | 98304 | -1 +(1 row) + +\c t9 +CREATE EXTENSION diskquota; +CREATE TABLE f9(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f9 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f9 | 98304 | -1 +(1 row) + +\c t10 +CREATE EXTENSION diskquota; +CREATE TABLE f10(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f10 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f10 | 98304 | -1 +(1 row) + +\c t11 +CREATE EXTENSION diskquota; +CREATE TABLE f11(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f11 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f11 | 98304 | -1 +(1 row) + +\c t1 +INSERT into f1 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f1 | 3997696 | -1 +(1 row) + +\c t7 +INSERT into f7 SELECT generate_series(0,100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; + tableid | size | segid +---------+---------+------- + f7 | 3997696 | -1 +(1 row) + +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f1; +CREATE EXTENSION diskquota; +CREATE TABLE f1(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f1 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f1 | 98304 | -1 +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +DROP TABLE f2; +CREATE EXTENSION diskquota; +CREATE TABLE f2(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f2 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f2 | 98304 | -1 +(1 row) + +\c t3 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t4 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t5 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t6 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t7 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t8 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t9 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t10 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t11 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t12 +CREATE EXTENSION diskquota; +CREATE TABLE f12(a int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +INSERT into f12 SELECT generate_series(0,1000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; + tableid | size | segid +---------+-------+------- + f12 | 98304 | -1 +(1 row) + +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +--start_ignore +\c contrib_regression +DROP DATABASE t1; +DROP DATABASE t2; +DROP DATABASE t3; +DROP DATABASE t4; +DROP DATABASE t5; +DROP DATABASE t6; +DROP DATABASE t7; +DROP DATABASE t8; +DROP DATABASE t9; +DROP DATABASE t10; +DROP DATABASE t11; +DROP DATABASE t12; +\! gpconfig -r diskquota.worker_timeout; +20230117:13:04:52:2001815 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' +\! gpconfig -r diskquota.max_workers; +20230117:13:04:53:2002403 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore diff --git a/tests/regress/expected7/test_worker_schedule_exception.out b/tests/regress/expected7/test_worker_schedule_exception.out new file mode 100644 index 00000000000..aeb8e5d85be --- /dev/null +++ b/tests/regress/expected7/test_worker_schedule_exception.out @@ -0,0 +1,113 @@ +-- start_ignore +\! gpconfig -c diskquota.max_workers -v 10; +20230117:13:07:03:2006049 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 10' +\! gpconfig -c diskquota.naptime -v 4; +20230117:13:07:04:2006587 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 4' +\! gpstop -arf; +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +\c +DROP DATABASE IF EXISTS t1; +NOTICE: database "t1" does not exist, skipping +DROP DATABASE IF EXISTS t2; +NOTICE: database "t2" does not exist, skipping +--end_ignore +CREATE DATABASE t1; +CREATE DATABASE t2; +\c t1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; +\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l +2 +-- start_ignore +\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep +zhrt 2009311 2009263 10 13:09 ? 00:00:00 postgres: 7000, [diskquota] - launcher +zhrt 2009361 2009263 1 13:09 ? 00:00:00 postgres: 7000, bgworker: [diskquota] contrib_regression con8 cmd1 +--end_ignore +\c contrib_regression +DROP DATABASE t1; +\c t2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c t2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE t2; +--start_ignore +\! gpconfig -r diskquota.naptime; +20230117:13:09:27:2009995 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.naptime' +\! gpconfig -r diskquota.max_workers; +20230117:13:09:27:2010164 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' +\! gpstop -arf; +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +--end_ignore diff --git a/tests/regress/sql/test_relation_cache.sql b/tests/regress/sql/test_relation_cache.sql index b9739d9a8c0..d0e986e9395 100644 --- a/tests/regress/sql/test_relation_cache.sql +++ b/tests/regress/sql/test_relation_cache.sql @@ -55,7 +55,6 @@ drop table t; begin; create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; - select count(*) from diskquota.show_relation_cache_all_seg(); select diskquota.check_relation_cache(); diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 32fbf81345c..2750b1dd05c 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -5,6 +5,16 @@ if(NOT DEFINED DISKQUOTA_DDL_CHANGE_CHECK) STRING "Skip the DDL updates check. Should not be disabled on CI" FORCE) endif() +if (${GP_MAJOR_VERSION} EQUAL 6) + list(APPEND schedule_files + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--2.1 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.0 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.2 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.1 + ) +endif() regresstarget_add( upgradecheck INIT_FILE @@ -16,10 +26,7 @@ regresstarget_add( RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/results SCHEDULE_FILE - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--2.1 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.0 + ${schedule_files} REGRESS_OPTS --dbname=contrib_regression) @@ -50,10 +57,6 @@ endforeach() # if DDL file modified, insure the last release file passed in if(DISKQUOTA_DDL_CHANGE_CHECK AND DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) - message( - FATAL_ERROR - "DDL file modify detected, upgrade test is required. Add -DDISKQUOTA_LAST_RELEASE_PATH=//diskquota--_.tar.gz. And re-try the generation" - ) endif() # check if current version is compatible with the upgrade strategy diff --git a/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out b/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out new file mode 100644 index 00000000000..a36fcb4f8cd --- /dev/null +++ b/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out @@ -0,0 +1,16 @@ +-- need run 2.0_set_quota before run this test +-- FIXME add version check here +\!sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected/2.2_catalog.out b/upgrade_test/expected/2.2_catalog.out new file mode 100644 index 00000000000..287a353e8d1 --- /dev/null +++ b/upgrade_test/expected/2.2_catalog.out @@ -0,0 +1,310 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+---------------------------------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} + target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} +(17 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | {target_rowid_seq} | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.2.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.2.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.2.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.2.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.2.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.2.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.2.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.2.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.2.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.2.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.2.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.2.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.2.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.2.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.2.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.2.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.2.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation + + | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = (-1)))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected/2.2_cleanup_quota.out b/upgrade_test/expected/2.2_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected/2.2_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected/2.2_install.out b/upgrade_test/expected/2.2_install.out new file mode 100644 index 00000000000..c4b7f4c95ce --- /dev/null +++ b/upgrade_test/expected/2.2_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/2.2_migrate_to_version_2.2.out b/upgrade_test/expected/2.2_migrate_to_version_2.2.out new file mode 100644 index 00000000000..d54e99e69b2 --- /dev/null +++ b/upgrade_test/expected/2.2_migrate_to_version_2.2.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.2.so +Segment value: diskquota-2.2.so +\c +alter extension diskquota update to '2.2'; +\! sleep 5 diff --git a/upgrade_test/expected/2.2_set_quota.out b/upgrade_test/expected/2.2_set_quota.out new file mode 100644 index 00000000000..58d8cc0a69c --- /dev/null +++ b/upgrade_test/expected/2.2_set_quota.out @@ -0,0 +1,63 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.2.so +Segment value: diskquota-2.2.so +create extension diskquota with version '2.2'; +\!sleep 5 +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out b/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out new file mode 100644 index 00000000000..5c3f8c87862 --- /dev/null +++ b/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out @@ -0,0 +1,16 @@ +-- need run 2.1_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/schedule_2.1--2.2 b/upgrade_test/schedule_2.1--2.2 new file mode 100644 index 00000000000..06307e64059 --- /dev/null +++ b/upgrade_test/schedule_2.1--2.2 @@ -0,0 +1,8 @@ +test: 2.1_install +test: 2.1_set_quota +test: 2.1_catalog +test: 2.2_migrate_to_version_2.2 +test: 2.2_catalog +# run 2.1 behavior test using 2.2 DDL and binary +test: 2.1_test_in_2.2_quota_create_in_2.1 +test: 2.1_cleanup_quota diff --git a/upgrade_test/schedule_2.2--2.1 b/upgrade_test/schedule_2.2--2.1 new file mode 100644 index 00000000000..c6f79db7b0a --- /dev/null +++ b/upgrade_test/schedule_2.2--2.1 @@ -0,0 +1,8 @@ +test: 2.2_install +test: 2.2_set_quota +test: 2.2_catalog +test: 2.1_migrate_to_version_2.1 +test: 2.1_catalog +# run 2.2 behavior test using 2.1 DDL and binary +test: 2.2_test_in_2.1_quota_create_in_2.2 +test: 2.2_cleanup_quota diff --git a/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql b/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql new file mode 100644 index 00000000000..c2d9dbe33ea --- /dev/null +++ b/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql @@ -0,0 +1,17 @@ +-- need run 2.0_set_quota before run this test +-- FIXME add version check here + +\!sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; + diff --git a/upgrade_test/sql/2.2_catalog.sql b/upgrade_test/sql/2.2_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/upgrade_test/sql/2.2_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/sql/2.2_cleanup_quota.sql b/upgrade_test/sql/2.2_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/sql/2.2_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/sql/2.2_install.sql b/upgrade_test/sql/2.2_install.sql new file mode 100644 index 00000000000..33b2f0d3f4c --- /dev/null +++ b/upgrade_test/sql/2.2_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/2.2_migrate_to_version_2.2.sql b/upgrade_test/sql/2.2_migrate_to_version_2.2.sql new file mode 100644 index 00000000000..88303a66875 --- /dev/null +++ b/upgrade_test/sql/2.2_migrate_to_version_2.2.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.2'; +\! sleep 5 diff --git a/upgrade_test/sql/2.2_set_quota.sql b/upgrade_test/sql/2.2_set_quota.sql new file mode 100644 index 00000000000..adaf8707508 --- /dev/null +++ b/upgrade_test/sql/2.2_set_quota.sql @@ -0,0 +1,44 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.2'; +\!sleep 5 + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql b/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql new file mode 100644 index 00000000000..974df545602 --- /dev/null +++ b/upgrade_test/sql/2.2_test_in_2.1_quota_create_in_2.2.sql @@ -0,0 +1,16 @@ +-- need run 2.1_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; From 66d039e6b5f6a252d96f1c8e62d172a818334848 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 30 Jan 2023 17:18:10 +0800 Subject: [PATCH 250/330] Fix released tarball name. (#286) Co-authored-by: Hao Zhang --- cmake/Distro.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmake/Distro.cmake b/cmake/Distro.cmake index c619808b730..14f18f8c6f7 100644 --- a/cmake/Distro.cmake +++ b/cmake/Distro.cmake @@ -15,11 +15,12 @@ if(NOT DISTRO_NAME) string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") if (matched6) set(DISTRO_NAME rhel6) elseif(matched7) set(DISTRO_NAME rhel7) - elseif(matched_rhel8 OR matched_centos8) + elseif(matched_rhel8 OR matched_centos8 OR matched_rocky8) set(DISTRO_NAME rhel8) endif() elseif(EXISTS "/etc/os-release") From a331fd9744f07bc50d0f485d95cd5a39de9c8656 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 30 Jan 2023 18:33:15 +0800 Subject: [PATCH 251/330] Add an option to control whether compile with fault injector. (#287) Usage: ``` cmake -DDISKQUOTA_FAULT_INJECTOR=ON/OFF [default: OFF] ``` Co-authored-by: Hao Zhang --- CMakeLists.txt | 7 +++++++ cmake/Distro.cmake | 2 +- diskquota.c | 3 ++- diskquota_utility.c | 2 ++ gp_activetable.c | 5 ++++- quotamodel.c | 6 ++++-- 6 files changed, 20 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 514a7e7cb15..df8ed63290c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -168,6 +168,13 @@ BuildInfo_Create(${build_info_PATH} CMAKE_BUILD_TYPE) # Create build-info end +if(NOT DEFINED ENABLE_FAULT_INJECTOR) + set(ENABLE_FAULT_INJECTOR OFF) +endif() +if(ENABLE_FAULT_INJECTOR) + add_definitions(-DDISKQUOTA_FAULT_INJECTOR) +endif() + # Add installcheck targets add_subdirectory(tests) if(NOT DEFINED ENABLE_UPGRADE_TEST) diff --git a/cmake/Distro.cmake b/cmake/Distro.cmake index 14f18f8c6f7..c4d2a698ce2 100644 --- a/cmake/Distro.cmake +++ b/cmake/Distro.cmake @@ -15,7 +15,7 @@ if(NOT DISTRO_NAME) string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") - string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") if (matched6) set(DISTRO_NAME rhel6) elseif(matched7) diff --git a/diskquota.c b/diskquota.c index 4581a29e725..76e11fa5bcf 100644 --- a/diskquota.c +++ b/diskquota.c @@ -484,8 +484,9 @@ disk_quota_worker_main(Datum main_arg) while (!got_sigterm) { int rc; - +#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); +#endif if (!diskquota_is_paused()) { /* Refresh quota model with init mode */ diff --git a/diskquota_utility.c b/diskquota_utility.c index 6e55ee3cc5a..0623268a3c3 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1358,7 +1358,9 @@ relation_file_stat(int segno, void *ctx) else snprintf(file_path, MAXPGPATH, "%s.%u", stat_ctx->relation_path, segno); struct stat fst; +#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); +#endif if (stat(file_path, &fst) < 0) { if (errno != ENOENT) diff --git a/gp_activetable.c b/gp_activetable.c index 73732e67886..4b973ee8308 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -142,8 +142,9 @@ static void active_table_hook_smgrcreate(RelFileNodeBackend rnode) { if (prev_file_create_hook) (*prev_file_create_hook)(rnode); - +#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("diskquota_after_smgrcreate"); +#endif report_active_table_helper(&rnode); } @@ -219,7 +220,9 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, report_relation_cache_helper(objectId); break; case OAT_POST_ALTER: +#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("object_access_post_alter"); +#endif report_altered_reloid(objectId); break; default: diff --git a/quotamodel.c b/quotamodel.c index 8b7a97570b5..619da7f2b1e 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1576,9 +1576,9 @@ check_rejectmap_by_relfilenode(RelFileNode relfilenode) bool found; RejectMapEntry keyitem; GlobalRejectMapEntry *entry; - +#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("check_rejectmap_by_relfilenode"); - +#endif memset(&keyitem, 0, sizeof(keyitem)); memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); @@ -1683,8 +1683,10 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) enable_hardlimit = diskquota_hardlimit; +#ifdef DISKQUOTA_FAULT_INJECTOR #ifdef FAULT_INJECTOR if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; +#endif #endif if (relfilenode && enable_hardlimit) return check_rejectmap_by_relfilenode(*relfilenode); From a9dfe96d5d346bf8ea68b3c4c149314a929832d4 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 30 Jan 2023 19:18:39 +0800 Subject: [PATCH 252/330] Add judgement for fault injector. (#288) If fault injector is disabled, isolation2 will be disabled. --- tests/CMakeLists.txt | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9a77457250a..d420e1ce029 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -40,9 +40,14 @@ add_custom_target(install_test_extension ) add_custom_target(installcheck) -add_dependencies(isolation2 install_test_extension) add_dependencies(regress install_test_extension) -add_dependencies(installcheck isolation2 regress) + +if(ENABLE_FAULT_INJECTOR) + add_dependencies(isolation2 install_test_extension) + add_dependencies(installcheck isolation2 regress) +else() + add_dependencies(installcheck regress) +endif() # Example to run test_truncate infinite times # RegressTarget_Add(regress_config From bb0568954ebe88508cbb604eef2c4f71be81e702 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Mon, 30 Jan 2023 19:48:13 +0800 Subject: [PATCH 253/330] Revert "Add judgement for fault injector. (#288)" (#291) This reverts commit a9dfe96d5d346bf8ea68b3c4c149314a929832d4. Revert "Add an option to control whether compile with fault injector. (#287)" This reverts commit a331fd9744f07bc50d0f485d95cd5a39de9c8656. Co-authored-by: Hao Zhang --- CMakeLists.txt | 7 ------- cmake/Distro.cmake | 2 +- diskquota.c | 3 +-- diskquota_utility.c | 2 -- gp_activetable.c | 5 +---- quotamodel.c | 6 ++---- tests/CMakeLists.txt | 9 ++------- 7 files changed, 7 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index df8ed63290c..514a7e7cb15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -168,13 +168,6 @@ BuildInfo_Create(${build_info_PATH} CMAKE_BUILD_TYPE) # Create build-info end -if(NOT DEFINED ENABLE_FAULT_INJECTOR) - set(ENABLE_FAULT_INJECTOR OFF) -endif() -if(ENABLE_FAULT_INJECTOR) - add_definitions(-DDISKQUOTA_FAULT_INJECTOR) -endif() - # Add installcheck targets add_subdirectory(tests) if(NOT DEFINED ENABLE_UPGRADE_TEST) diff --git a/cmake/Distro.cmake b/cmake/Distro.cmake index c4d2a698ce2..14f18f8c6f7 100644 --- a/cmake/Distro.cmake +++ b/cmake/Distro.cmake @@ -15,7 +15,7 @@ if(NOT DISTRO_NAME) string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") - string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") if (matched6) set(DISTRO_NAME rhel6) elseif(matched7) diff --git a/diskquota.c b/diskquota.c index 76e11fa5bcf..4581a29e725 100644 --- a/diskquota.c +++ b/diskquota.c @@ -484,9 +484,8 @@ disk_quota_worker_main(Datum main_arg) while (!got_sigterm) { int rc; -#ifdef DISKQUOTA_FAULT_INJECTOR + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); -#endif if (!diskquota_is_paused()) { /* Refresh quota model with init mode */ diff --git a/diskquota_utility.c b/diskquota_utility.c index 0623268a3c3..6e55ee3cc5a 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1358,9 +1358,7 @@ relation_file_stat(int segno, void *ctx) else snprintf(file_path, MAXPGPATH, "%s.%u", stat_ctx->relation_path, segno); struct stat fst; -#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("diskquota_before_stat_relfilenode"); -#endif if (stat(file_path, &fst) < 0) { if (errno != ENOENT) diff --git a/gp_activetable.c b/gp_activetable.c index 4b973ee8308..73732e67886 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -142,9 +142,8 @@ static void active_table_hook_smgrcreate(RelFileNodeBackend rnode) { if (prev_file_create_hook) (*prev_file_create_hook)(rnode); -#ifdef DISKQUOTA_FAULT_INJECTOR + SIMPLE_FAULT_INJECTOR("diskquota_after_smgrcreate"); -#endif report_active_table_helper(&rnode); } @@ -220,9 +219,7 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, report_relation_cache_helper(objectId); break; case OAT_POST_ALTER: -#ifdef DISKQUOTA_FAULT_INJECTOR SIMPLE_FAULT_INJECTOR("object_access_post_alter"); -#endif report_altered_reloid(objectId); break; default: diff --git a/quotamodel.c b/quotamodel.c index 619da7f2b1e..8b7a97570b5 100644 --- a/quotamodel.c +++ b/quotamodel.c @@ -1576,9 +1576,9 @@ check_rejectmap_by_relfilenode(RelFileNode relfilenode) bool found; RejectMapEntry keyitem; GlobalRejectMapEntry *entry; -#ifdef DISKQUOTA_FAULT_INJECTOR + SIMPLE_FAULT_INJECTOR("check_rejectmap_by_relfilenode"); -#endif + memset(&keyitem, 0, sizeof(keyitem)); memcpy(&keyitem.relfilenode, &relfilenode, sizeof(RelFileNode)); @@ -1683,10 +1683,8 @@ quota_check_common(Oid reloid, RelFileNode *relfilenode) enable_hardlimit = diskquota_hardlimit; -#ifdef DISKQUOTA_FAULT_INJECTOR #ifdef FAULT_INJECTOR if (SIMPLE_FAULT_INJECTOR("enable_check_quota_by_relfilenode") == FaultInjectorTypeSkip) enable_hardlimit = true; -#endif #endif if (relfilenode && enable_hardlimit) return check_rejectmap_by_relfilenode(*relfilenode); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d420e1ce029..9a77457250a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -40,14 +40,9 @@ add_custom_target(install_test_extension ) add_custom_target(installcheck) +add_dependencies(isolation2 install_test_extension) add_dependencies(regress install_test_extension) - -if(ENABLE_FAULT_INJECTOR) - add_dependencies(isolation2 install_test_extension) - add_dependencies(installcheck isolation2 regress) -else() - add_dependencies(installcheck regress) -endif() +add_dependencies(installcheck isolation2 regress) # Example to run test_truncate infinite times # RegressTarget_Add(regress_config From 342997081d375a179e6dc3fd387bfb000b297f6d Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 31 Jan 2023 15:36:31 +0800 Subject: [PATCH 254/330] CI: Fix pipeline (#293) - Switch GPDB binary to release-candidate for release build. - Remove test_task from the release pipeline. Co-authored-by: Xing Guo --- concourse/pipeline/job_def.lib.yml | 2 ++ concourse/pipeline/res_def.yml | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index b20c4e5e6bf..130e89e3fad 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -231,7 +231,9 @@ plan: #! - get: last_released_diskquota_bin #! resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) +#@ if conf["build_type"] != "Release": - #@ _test_task(conf) +#@ end - put: #@ conf["res_intermediates_bin"] params: file: diskquota_artifacts/diskquota.tar.gz diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 002130c8cf1..54d05adf4f9 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -172,31 +172,31 @@ resources: source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-centos6.tar.gz - name: bin_gpdb6_centos7 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-centos7.tar.gz - name: bin_gpdb6_rhel8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-rhel8.tar.gz - name: bin_gpdb6_ubuntu18 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-ubuntu18.04.tar.gz - name: bin_gpdb7_rhel8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.((9[0-8])|([1-8]?\d))\.(.*)-rhel8.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 From 10606b37d801f3dba5f4c6a029b0adf0ddeff7bb Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 3 Feb 2023 10:57:17 +0800 Subject: [PATCH 255/330] Format code by clang-format. (#296) --- diskquota.c | 16 +++++++--------- diskquota_utility.c | 3 ++- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/diskquota.c b/diskquota.c index 4581a29e725..aa24b6cdff5 100644 --- a/diskquota.c +++ b/diskquota.c @@ -909,7 +909,8 @@ init_database_list(void) if (ret != SPI_OK_CONNECT) { int saved_errno = errno; - ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", strerror(saved_errno), ret))); + ereport(ERROR, (errmsg("[diskquota launcher] SPI connect error, reason: %s, return code: %d.", + strerror(saved_errno), ret))); } ret = SPI_execute("select dbid from diskquota_namespace.database_list;", true, 0); if (ret != SPI_OK_SELECT) @@ -917,8 +918,7 @@ init_database_list(void) int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] 'select diskquota_namespace.database_list', reason: %s, return code: %d.", - strerror(saved_errno), - ret))); + strerror(saved_errno), ret))); } tupdesc = SPI_tuptable->tupdesc; #if GP_VERSION_NUM < 70000 @@ -1234,8 +1234,7 @@ add_dbid_to_database_list(Oid dbid) int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] error occured while checking database_list, " " code: %d, reason: %s.", - ret, - strerror(saved_errno)))); + ret, strerror(saved_errno)))); } if (SPI_processed == 1) @@ -1254,8 +1253,7 @@ add_dbid_to_database_list(Oid dbid) int saved_errno = errno; ereport(ERROR, (errmsg("[diskquota launcher] error occured while updating database_list, " " code: %d, reason: %s.", - ret, - strerror(saved_errno)))); + ret, strerror(saved_errno)))); } return; @@ -1282,8 +1280,8 @@ del_dbid_from_database_list(Oid dbid) if (ret != SPI_OK_DELETE) { int saved_errno = errno; - ereport(ERROR, - (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", strerror(saved_errno), ret))); + ereport(ERROR, (errmsg("[diskquota launcher] del_dbid_from_database_list: reason: %s, ret_code: %d.", + strerror(saved_errno), ret))); } } diff --git a/diskquota_utility.c b/diskquota_utility.c index 6e55ee3cc5a..da9bc080fe2 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1364,7 +1364,8 @@ relation_file_stat(int segno, void *ctx) if (errno != ENOENT) { int saved_errno = errno; - ereport(WARNING, (errcode_for_file_access(), errmsg("[diskquota] could not stat file %s: %s", file_path, strerror(saved_errno)))); + ereport(WARNING, (errcode_for_file_access(), + errmsg("[diskquota] could not stat file %s: %s", file_path, strerror(saved_errno)))); } return false; } From 54a273e85426885db4e9f55c58a1638d69c3b543 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 3 Feb 2023 12:10:26 +0800 Subject: [PATCH 256/330] Add command to compile isolation2. (#297) Isolation2 compilation command is removed by #285. We add it into Regress.cmake in this commit. Co-authored-by: Xing Guo higuoxing@gmail.com --- cmake/Regress.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index 6d91c760823..b265eb7f00f 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -43,6 +43,9 @@ function(_PGIsolation2Target_Add working_DIR) add_custom_target( pg_isolation2_regress + COMMAND + make -C ${PG_SRC_DIR}/src/test/isolation2 install + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PG_SRC_DIR}/src/test/isolation2/sql_isolation_testcase.py ${working_DIR} ) From fa269a3ac25d852eaae7cee4ea0cda884bec9e57 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 3 Feb 2023 15:49:39 +0800 Subject: [PATCH 257/330] Fix flaky test (#294) - Fix flaky test test_ctas_before_set_quota. pg_type will be an active table after `CREATE TABLE`. It does not affect the function of diskquota but makes the test results unstable. In fact, we do not care about the table size of the system catalog table. So we simply skip the active table oid of these tables. - Fix test_vacuum/test_truncate. gp_wait_until_triggered_fault should be called after gp_inject_fault_infinite with suspend flag. Co-authored-by: Xing Guo Co-authored-by: Xiaoran Wang --- gp_activetable.c | 7 ++++++- tests/isolation2/expected/test_truncate.out | 7 +++++++ tests/isolation2/expected/test_vacuum.out | 7 +++++++ tests/isolation2/expected7/test_truncate.out | 7 +++++++ tests/isolation2/expected7/test_vacuum.out | 7 +++++++ tests/isolation2/sql/test_truncate.sql | 2 ++ tests/isolation2/sql/test_vacuum.sql | 2 ++ 7 files changed, 38 insertions(+), 1 deletion(-) diff --git a/gp_activetable.c b/gp_activetable.c index 73732e67886..8caeccf8821 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -805,7 +805,12 @@ get_active_tables_oid(void) rnode.spcNode = active_table_file_entry->tablespaceoid; relOid = get_relid_by_relfilenode(rnode); - if (relOid != InvalidOid) + /* skip system catalog tables */ + if (relOid < FirstNormalObjectId) + { + hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); + } + else if (relOid != InvalidOid) { prelid = get_primary_table_oid(relOid, true); active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); diff --git a/tests/isolation2/expected/test_truncate.out b/tests/isolation2/expected/test_truncate.out index d176b404eda..4964f6ec177 100644 --- a/tests/isolation2/expected/test_truncate.out +++ b/tests/isolation2/expected/test_truncate.out @@ -39,6 +39,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) 1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/isolation2/expected/test_vacuum.out b/tests/isolation2/expected/test_vacuum.out index 47eb944d968..eb43793236e 100644 --- a/tests/isolation2/expected/test_vacuum.out +++ b/tests/isolation2/expected/test_vacuum.out @@ -52,6 +52,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) 1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/isolation2/expected7/test_truncate.out b/tests/isolation2/expected7/test_truncate.out index d176b404eda..4964f6ec177 100644 --- a/tests/isolation2/expected7/test_truncate.out +++ b/tests/isolation2/expected7/test_truncate.out @@ -39,6 +39,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) 1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/isolation2/expected7/test_vacuum.out b/tests/isolation2/expected7/test_vacuum.out index 47eb944d968..eb43793236e 100644 --- a/tests/isolation2/expected7/test_vacuum.out +++ b/tests/isolation2/expected7/test_vacuum.out @@ -52,6 +52,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) 1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch diff --git a/tests/isolation2/sql/test_truncate.sql b/tests/isolation2/sql/test_truncate.sql index 5bce332053f..538b6318209 100644 --- a/tests/isolation2/sql/test_truncate.sql +++ b/tests/isolation2/sql/test_truncate.sql @@ -14,6 +14,8 @@ SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; SELECT diskquota.wait_for_worker_new_epoch(); 1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) diff --git a/tests/isolation2/sql/test_vacuum.sql b/tests/isolation2/sql/test_vacuum.sql index cf46bb40ddf..4125ac5f055 100644 --- a/tests/isolation2/sql/test_vacuum.sql +++ b/tests/isolation2/sql/test_vacuum.sql @@ -26,6 +26,8 @@ SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; SELECT diskquota.wait_for_worker_new_epoch(); 1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) + FROM gp_segment_configuration WHERE role='p' AND content<>-1; -- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. SELECT diskquota.wait_for_worker_new_epoch(); SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) From 9ae2e82156c9fc366e5873d98012229822cccc6c Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 6 Feb 2023 09:07:44 +0800 Subject: [PATCH 258/330] Fix flaky test test_rejectmap_mul_db (#295) When creating a new table, pg_type will be in active tables. Filter the system catalog table. And remove pause in the test. --- tests/regress/expected/test_rejectmap_mul_db.out | 15 +-------------- tests/regress/expected7/test_rejectmap_mul_db.out | 15 +-------------- tests/regress/sql/test_rejectmap_mul_db.sql | 3 --- 3 files changed, 2 insertions(+), 31 deletions(-) diff --git a/tests/regress/expected/test_rejectmap_mul_db.out b/tests/regress/expected/test_rejectmap_mul_db.out index 40c43ae3d78..78a10aad70f 100644 --- a/tests/regress/expected/test_rejectmap_mul_db.out +++ b/tests/regress/expected/test_rejectmap_mul_db.out @@ -22,14 +22,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu1 INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:6003 pid=3985762) --- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. -SELECT diskquota.pause(); - pause -------- - -(1 row) - +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:8003 pid=43782) -- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; count @@ -61,12 +54,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -SELECT diskquota.pause(); - pause -------- - -(1 row) - --\c tjmu1 -- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 -- The entries for tjmu1 should not be cleared diff --git a/tests/regress/expected7/test_rejectmap_mul_db.out b/tests/regress/expected7/test_rejectmap_mul_db.out index ed2edc8df33..89142d8b041 100644 --- a/tests/regress/expected7/test_rejectmap_mul_db.out +++ b/tests/regress/expected7/test_rejectmap_mul_db.out @@ -24,14 +24,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu1 INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:7003 pid=1961462) --- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. -SELECT diskquota.pause(); - pause -------- - -(1 row) - +ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:8003 pid=43782) -- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; count @@ -63,12 +56,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) -SELECT diskquota.pause(); - pause -------- - -(1 row) - --\c tjmu1 -- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 -- The entries for tjmu1 should not be cleared diff --git a/tests/regress/sql/test_rejectmap_mul_db.sql b/tests/regress/sql/test_rejectmap_mul_db.sql index 3b2fd734f13..57fd16db43a 100644 --- a/tests/regress/sql/test_rejectmap_mul_db.sql +++ b/tests/regress/sql/test_rejectmap_mul_db.sql @@ -16,8 +16,6 @@ CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu1 INSERT INTO b SELECT generate_series(1, 100000000); -- fail --- NOTE: Pause to avoid tjmu1's worker clear the active table. Since the naptime is 0 on CI, this might be flaky. -SELECT diskquota.pause(); -- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; @@ -29,7 +27,6 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu2 INSERT INTO b SELECT generate_series(1, 100000000); -- fail SELECT diskquota.wait_for_worker_new_epoch(); -SELECT diskquota.pause(); --\c tjmu1 -- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 From bfe7bad2b6d082dcc1ed36681ff7685545f7a507 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 7 Feb 2023 15:23:17 +0800 Subject: [PATCH 259/330] Fix bug (#298) Fix the following bugs: - Judgement condition for update_relation_cache should be `&&`, instead of `||` - The lock for relation_open/relation_close should be AccessShareLock for gpdb7. - For `truncate table`, we cannot get the table's oid by new relfilenode immediately after `file_create_hook` is finished. So we should keep the relfilenode in active_table_file_map and wait for the next loop to calculate the correct size for this table. --- gp_activetable.c | 14 +++++++++++--- relation_cache.c | 8 ++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/gp_activetable.c b/gp_activetable.c index 8caeccf8821..4cf2144fe5b 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -268,12 +268,17 @@ report_relation_cache_helper(Oid relid) #if GP_VERSION_NUM < 70000 rel = diskquota_relation_open(relid, NoLock); #else - rel = diskquota_relation_open(relid, AccessShareLock); + rel = diskquota_relation_open(relid, AccessShareLock); #endif /* GP_VERSION_NUM */ - if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE || rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE || + if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE && rel->rd_rel->relkind != RELKIND_VIEW) update_relation_cache(relid); + +#if GP_VERSION_NUM < 70000 relation_close(rel, NoLock); +#else + relation_close(rel, AccessShareLock); +#endif /* GP_VERSION_NUM */ } /* @@ -805,12 +810,15 @@ get_active_tables_oid(void) rnode.spcNode = active_table_file_entry->tablespaceoid; relOid = get_relid_by_relfilenode(rnode); + /* If relfilenode is not prepared for some relation, just skip it. */ + if (!OidIsValid(relOid)) continue; + /* skip system catalog tables */ if (relOid < FirstNormalObjectId) { hash_search(local_active_table_file_map, active_table_file_entry, HASH_REMOVE, NULL); } - else if (relOid != InvalidOid) + else { prelid = get_primary_table_oid(relOid, true); active_table_entry = hash_search(local_active_table_stats_map, &prelid, HASH_ENTER, &found); diff --git a/relation_cache.c b/relation_cache.c index 224a9c37ce4..352852e9348 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -166,7 +166,11 @@ update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, Di relation_entry->primary_table_relid = relid; +#if GP_VERSION_NUM < 70000 relation_close(rel, NoLock); +#else + relation_close(rel, AccessShareLock); +#endif /* GP_VERSION_NUM */ } void @@ -235,7 +239,11 @@ parse_primary_table_oid(Oid relid, bool on_bgworker) } namespace = rel->rd_rel->relnamespace; memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); +#if GP_VERSION_NUM < 70000 relation_close(rel, NoLock); +#else + relation_close(rel, AccessShareLock); +#endif /* GP_VERSION_NUM */ } parsed_oid = diskquota_parse_primary_table_oid(namespace, relname); From ff57e49ec2fcca564ce2f9848d83835e7bb053c4 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 8 Feb 2023 14:54:16 +0800 Subject: [PATCH 260/330] Enable upgrade test for CI (#299) Revert some modification from #285. - Add last_released_diskquota_bin back for CI. - Enable upgradecheck. - Add -DDISKQUOTA_LAST_RELEASE_PATH for cmake. --- CMakeLists.txt | 7 +------ concourse/pipeline/job_def.lib.yml | 14 +++++++------- concourse/scripts/build_diskquota.sh | 4 ++-- concourse/scripts/test_diskquota.sh | 2 +- concourse/tasks/build_diskquota.yml | 2 +- upgrade_test/CMakeLists.txt | 4 ++++ 6 files changed, 16 insertions(+), 17 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 514a7e7cb15..62111646cff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -170,12 +170,7 @@ BuildInfo_Create(${build_info_PATH} # Add installcheck targets add_subdirectory(tests) -if(NOT DEFINED ENABLE_UPGRADE_TEST) - set(ENABLE_UPGRADE_TEST ON) -endif() -if(ENABLE_UPGRADE_TEST) - add_subdirectory(upgrade_test) -endif() +add_subdirectory(upgrade_test) # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 130e89e3fad..7e39bbaa5e8 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -15,7 +15,7 @@ res_build_image: centos6-gpdb6-image-build res_test_images: [centos6-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") -#! res_diskquota_bin: bin_diskquota_gpdb6_rhel6 +res_diskquota_bin: bin_diskquota_gpdb6_rhel6 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel6_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel6_release os: rhel6 @@ -28,7 +28,7 @@ build_type: #@ "Release" if release_build else "Debug" res_build_image: centos7-gpdb6-image-build res_test_images: [centos7-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") -#! res_diskquota_bin: bin_diskquota_gpdb6_rhel7 +res_diskquota_bin: bin_diskquota_gpdb6_rhel7 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel7_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel7_release os: rhel7 @@ -41,7 +41,7 @@ build_type: #@ "Release" if release_build else "Debug" res_build_image: rhel8-gpdb6-image-build res_test_images: [rhel8-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") -#! res_diskquota_bin: bin_diskquota_gpdb6_rhel8 +res_diskquota_bin: bin_diskquota_gpdb6_rhel8 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel8_intermediates", release_build) release_bin: bin_diskquota_gpdb6_rhel8_release os: rhel8 @@ -54,7 +54,7 @@ build_type: #@ "Release" if release_build else "Debug" res_build_image: ubuntu18-gpdb6-image-build res_test_images: [ubuntu18-gpdb6-image-test] res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") -#! res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 +res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_ubuntu18_intermediates", release_build) release_bin: bin_diskquota_gpdb6_ubuntu18_release os: ubuntu18.04 @@ -67,7 +67,7 @@ build_type: #@ "Release" if release_build else "Debug" res_build_image: rocky8-gpdb7-image-build res_test_images: [rocky8-gpdb7-image-test, rhel8-gpdb7-image-test] res_gpdb_bin: #@ "bin_gpdb7_rhel8" + ("" if release_build else "_debug") -#! res_diskquota_bin: bin_diskquota_gpdb7_rhel8 +res_diskquota_bin: bin_diskquota_gpdb7_rhel8 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb7_rhel8_intermediates", release_build) release_bin: bin_diskquota_gpdb7_rhel8_release os: rhel8 @@ -228,8 +228,8 @@ plan: - get: #@ test_image #@ end - get: #@ conf["res_gpdb_bin"] - #! - get: last_released_diskquota_bin - #! resource: #@ conf["res_diskquota_bin"] + - get: last_released_diskquota_bin + resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) #@ if conf["build_type"] != "Release": - #@ _test_task(conf) diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 3ca3efbc970..7b2ee30b6af 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -12,10 +12,10 @@ function pkg() { pushd /home/gpadmin/diskquota_artifacts local last_release_path - # last_release_path=$(readlink -e /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) + last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) cmake /home/gpadmin/diskquota_src \ + -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" - # -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ cmake --build . --target create_artifact popd } diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 12566032570..87c48663458 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -30,7 +30,7 @@ function _main() { # activate_standby # time cmake --build . --target installcheck # Run upgrade test (with standby master) - # time cmake --build . --target upgradecheck + time cmake --build . --target upgradecheck popd } diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml index ba71054fd1b..cacf0fb2c9b 100644 --- a/concourse/tasks/build_diskquota.yml +++ b/concourse/tasks/build_diskquota.yml @@ -6,7 +6,7 @@ inputs: - name: diskquota_src - name: gpdb_src - name: bin_cmake - # - name: last_released_diskquota_bin + - name: last_released_diskquota_bin outputs: - name: diskquota_artifacts diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 2750b1dd05c..1e3a256283a 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -57,6 +57,10 @@ endforeach() # if DDL file modified, insure the last release file passed in if(DISKQUOTA_DDL_CHANGE_CHECK AND DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) + message( + FATAL_ERROR + "DDL file modify detected, upgrade test is required. Add -DDISKQUOTA_LAST_RELEASE_PATH=//diskquota--_.tar.gz. And re-try the generation" + ) endif() # check if current version is compatible with the upgrade strategy From b2367939bc2ff24bb8959ab6990e86ac559e7ef7 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 9 Feb 2023 13:32:26 +0800 Subject: [PATCH 261/330] Skip fault injector case for release build (#302) Due to the release build change for GP7, the fault injector doesn't work with the release build. So, all the tests were temporally disabled for release pipelines. Since we switched to use the `--disable-debug-extensions` gpdb build, the fault injector is not available for the release pipeline. - Add 'EXCLUDE_FAULT_INJECT_TEST' to Regress.cmake, so it will be smart enough to check if there are any fault injector case in the give tests set. Ignore them if so. - Skip the fault injector tests for the release pipeline. - Enable the CI test task for GP7. --- .editorconfig | 4 +++ cmake/Regress.cmake | 39 +++++++++++++++++++++++++++++- concourse/pipeline/job_def.lib.yml | 2 -- tests/CMakeLists.txt | 8 ++++++ 4 files changed, 50 insertions(+), 3 deletions(-) diff --git a/.editorconfig b/.editorconfig index 635d6f2296e..053c91fd382 100644 --- a/.editorconfig +++ b/.editorconfig @@ -20,3 +20,7 @@ indent_size = 4 indent_style = space indent_size = 2 +[cmake/**.cmake] +indent_style = space +indent_size = 4 + diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index b265eb7f00f..62d8807fb37 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -12,6 +12,7 @@ # [REGRESS_OPTS ...] # [REGRESS_TYPE isolation2/regress] # [RUN_TIMES ] +# [EXCLUDE_FAULT_INJECT_TEST ] # ) # All the file path can be the relative path to ${CMAKE_CURRENT_SOURCE_DIR}. # A bunch of diff targets will be created as well for comparing the regress results. The diff @@ -51,11 +52,31 @@ function(_PGIsolation2Target_Add working_DIR) ) endfunction() +# Find all tests in the given directory which uses fault injector, and add them to +# fault_injector_test_list. +function(_Find_FaultInjector_Tests sql_DIR) + file(GLOB files "${sql_DIR}/*.sql") + foreach(f ${files}) + set(ret 1) + execute_process( + COMMAND + grep gp_inject_fault ${f} + OUTPUT_QUIET + RESULT_VARIABLE ret) + if(ret EQUAL 0) + get_filename_component(test_name ${f} NAME_WLE) + list(APPEND fault_injector_test_list ${test_name}) + endif() + endforeach() + + set(fault_injector_test_list ${fault_injector_test_list} PARENT_SCOPE) +endfunction() + function(RegressTarget_Add name) cmake_parse_arguments( arg "" - "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES" + "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES;EXCLUDE_FAULT_INJECT_TEST" "REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" ${ARGN}) if (NOT arg_EXPECTED_DIR) @@ -85,8 +106,16 @@ function(RegressTarget_Add name) endif() endif() + # Find all tests using fault injector + if(arg_EXCLUDE_FAULT_INJECT_TEST) + _Find_FaultInjector_Tests(${arg_SQL_DIR}) + endif() + # Set REGRESS test cases foreach(r IN LISTS arg_REGRESS) + if (arg_EXCLUDE_FAULT_INJECT_TEST AND (r IN_LIST fault_injector_test_list)) + continue() + endif() set(regress_arg ${regress_arg} ${r}) endforeach() @@ -102,12 +131,20 @@ function(RegressTarget_Add name) foreach(o IN LISTS arg_EXCLUDE) list(APPEND to_exclude ${o}) endforeach() + if(arg_EXCLUDE_FAULT_INJECT_TEST) + list(APPEND to_exclude ${fault_injector_test_list}) + endif() if (to_exclude) set(exclude_arg "--exclude-tests=${to_exclude}") string(REPLACE ";" "," exclude_arg "${exclude_arg}") set(regress_opts_arg ${regress_opts_arg} ${exclude_arg}) endif() foreach(o IN LISTS arg_REGRESS_OPTS) + # If the fault injection tests are excluded, ignore the --load-extension=gp_inject_fault as + # well. + if (arg_EXCLUDE_FAULT_INJECT_TEST AND (o MATCHES ".*inject_fault")) + continue() + endif() set(regress_opts_arg ${regress_opts_arg} ${o}) endforeach() diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 7e39bbaa5e8..78119ad5645 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -231,9 +231,7 @@ plan: - get: last_released_diskquota_bin resource: #@ conf["res_diskquota_bin"] - #@ _build_task(conf) -#@ if conf["build_type"] != "Release": - #@ _test_task(conf) -#@ end - put: #@ conf["res_intermediates_bin"] params: file: diskquota_artifacts/diskquota.tar.gz diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9a77457250a..13cc145a2bc 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -4,6 +4,12 @@ if (${GP_MAJOR_VERSION} EQUAL 7) set(EXPECTED_DIR_SUFFIX "7") endif() +set(exclude_fault_injector OFF) +# GP7 release build doesn't support fault injector. +if (CMAKE_BUILD_TYPE STREQUAL "Release") + message(WARNING "Fault injector test cases will be disabled.") + set(exclude_fault_injector ON) +endif() RegressTarget_Add(regress INIT_FILE @@ -13,6 +19,7 @@ RegressTarget_Add(regress RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule${EXPECTED_DIR_SUFFIX} + EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} REGRESS_OPTS --load-extension=gp_inject_fault --load-extension=diskquota_test @@ -28,6 +35,7 @@ RegressTarget_Add(isolation2 RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule${EXPECTED_DIR_SUFFIX} + EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} REGRESS_OPTS --load-extension=gp_inject_fault --dbname=isolation2test) From 3283772c17b2b73ca93bd64c03a21faa53729cf3 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 15 Feb 2023 16:55:27 +0800 Subject: [PATCH 262/330] VAR replace for Regress.cmake and fix test_rejectmap (#304) Since GP7 doesn't support plpythonu, the test_rejectmap was broken for GP7. This commit: - Improve the Regress.cmake, so if the input sql file has a "in.sql" extension, "@VAR@" in it will be replaced by the corresponding cmake VAR. - SQL_DIR/EXPECTED_DIR takes list as the argument now. So only the different cases need to be put into the expected7. Others will be used from expected directly. - Due to above change, same tests for gp6 and gp7 are removed from gp7, only diff is needed. - Set different @PLPYTHON_LANG_STR@ for GP6 & GP7 - Due to plpython composite type behavior change, the python code in the test has been modified. The behavior change is probably related to PG commit 94aceed317. --- cmake/Regress.cmake | 68 +- tests/CMakeLists.txt | 12 +- tests/init_file | 2 + tests/isolation2/expected/test_rejectmap.out | 4 +- tests/isolation2/expected7/config.out | 30 - tests/isolation2/expected7/reset_config.out | 10 - .../expected7/test_create_extension.out | 15 - .../expected7/test_drop_extension.out | 12 - .../expected7/test_fast_quota_view.out | 182 ----- .../expected7/test_postmaster_restart.out | 139 ---- tests/isolation2/expected7/test_rejectmap.out | 738 ------------------ .../expected7/test_relation_cache.out | 70 -- .../expected7/test_relation_size.out | 87 --- tests/isolation2/expected7/test_truncate.out | 86 -- tests/isolation2/expected7/test_vacuum.out | 99 --- .../expected7/test_worker_timeout.out | 38 - tests/isolation2/isolation2_schedule7 | 2 +- ...st_rejectmap.sql => test_rejectmap.in.sql} | 31 +- tests/regress/expected7/config.out | 70 -- tests/regress/expected7/reset_config.out | 17 - .../expected7/test_activetable_limit.out | 56 -- .../test_clean_rejectmap_after_drop.out | 42 - tests/regress/expected7/test_column.out | 42 - tests/regress/expected7/test_copy.out | 26 - .../expected7/test_create_extension.out | 14 - .../expected7/test_ctas_before_set_quota.out | 61 -- .../expected7/test_ctas_no_preload_lib.out | 85 -- tests/regress/expected7/test_ctas_pause.out | 37 - tests/regress/expected7/test_ctas_role.out | 81 -- tests/regress/expected7/test_ctas_schema.out | 64 -- .../expected7/test_ctas_tablespace_role.out | 78 -- .../expected7/test_ctas_tablespace_schema.out | 74 -- .../expected7/test_default_tablespace.out | 186 ----- tests/regress/expected7/test_delete_quota.out | 37 - .../expected7/test_drop_after_pause.out | 64 -- .../regress/expected7/test_drop_extension.out | 13 - tests/regress/expected7/test_drop_table.out | 34 - tests/regress/expected7/test_extension.out | 523 ------------- .../expected7/test_fast_disk_check.out | 23 - .../expected7/test_fetch_table_stat.out | 35 - tests/regress/expected7/test_index.out | 133 ---- .../expected7/test_many_active_tables.out | 31 - tests/regress/expected7/test_mistake.out | 34 - tests/regress/expected7/test_partition.out | 63 -- .../expected7/test_pause_and_resume.out | 70 -- .../test_pause_and_resume_multiple_db.out | 201 ----- .../expected7/test_primary_failure.out | 126 --- .../expected7/test_quota_view_no_table.out | 64 -- .../expected7/test_readiness_logged.out | 38 - tests/regress/expected7/test_recreate.out | 27 - .../expected7/test_rejectmap_mul_db.out | 79 -- .../regress/expected7/test_relation_size.out | 99 --- tests/regress/expected7/test_rename.out | 71 -- tests/regress/expected7/test_reschema.out | 39 - tests/regress/expected7/test_role.out | 138 ---- tests/regress/expected7/test_schema.out | 109 --- tests/regress/expected7/test_show_status.out | 67 -- .../expected7/test_tablespace_diff_schema.out | 87 --- .../expected7/test_tablespace_role.out | 194 ----- .../expected7/test_tablespace_role_perseg.out | 235 ------ .../expected7/test_tablespace_schema.out | 147 ---- .../test_tablespace_schema_perseg.out | 282 ------- tests/regress/expected7/test_temp_role.out | 40 - tests/regress/expected7/test_toast.out | 31 - tests/regress/expected7/test_truncate.out | 36 - tests/regress/expected7/test_update.out | 23 - .../expected7/test_update_db_cache.out | 64 -- tests/regress/expected7/test_vacuum.out | 57 -- .../expected7/test_worker_not_ready.out | 26 - .../expected7/test_worker_schedule.out | 633 --------------- .../test_worker_schedule_exception.out | 113 --- 71 files changed, 86 insertions(+), 6558 deletions(-) delete mode 100644 tests/isolation2/expected7/config.out delete mode 100644 tests/isolation2/expected7/reset_config.out delete mode 100644 tests/isolation2/expected7/test_create_extension.out delete mode 100644 tests/isolation2/expected7/test_drop_extension.out delete mode 100644 tests/isolation2/expected7/test_fast_quota_view.out delete mode 100644 tests/isolation2/expected7/test_postmaster_restart.out delete mode 100644 tests/isolation2/expected7/test_rejectmap.out delete mode 100644 tests/isolation2/expected7/test_relation_cache.out delete mode 100644 tests/isolation2/expected7/test_relation_size.out delete mode 100644 tests/isolation2/expected7/test_truncate.out delete mode 100644 tests/isolation2/expected7/test_vacuum.out delete mode 100644 tests/isolation2/expected7/test_worker_timeout.out rename tests/isolation2/sql/{test_rejectmap.sql => test_rejectmap.in.sql} (97%) delete mode 100644 tests/regress/expected7/config.out delete mode 100644 tests/regress/expected7/reset_config.out delete mode 100644 tests/regress/expected7/test_activetable_limit.out delete mode 100644 tests/regress/expected7/test_clean_rejectmap_after_drop.out delete mode 100644 tests/regress/expected7/test_column.out delete mode 100644 tests/regress/expected7/test_copy.out delete mode 100644 tests/regress/expected7/test_create_extension.out delete mode 100644 tests/regress/expected7/test_ctas_before_set_quota.out delete mode 100644 tests/regress/expected7/test_ctas_no_preload_lib.out delete mode 100644 tests/regress/expected7/test_ctas_pause.out delete mode 100644 tests/regress/expected7/test_ctas_role.out delete mode 100644 tests/regress/expected7/test_ctas_schema.out delete mode 100644 tests/regress/expected7/test_ctas_tablespace_role.out delete mode 100644 tests/regress/expected7/test_ctas_tablespace_schema.out delete mode 100644 tests/regress/expected7/test_default_tablespace.out delete mode 100644 tests/regress/expected7/test_delete_quota.out delete mode 100644 tests/regress/expected7/test_drop_after_pause.out delete mode 100644 tests/regress/expected7/test_drop_extension.out delete mode 100644 tests/regress/expected7/test_drop_table.out delete mode 100644 tests/regress/expected7/test_extension.out delete mode 100644 tests/regress/expected7/test_fast_disk_check.out delete mode 100644 tests/regress/expected7/test_fetch_table_stat.out delete mode 100644 tests/regress/expected7/test_index.out delete mode 100644 tests/regress/expected7/test_many_active_tables.out delete mode 100644 tests/regress/expected7/test_mistake.out delete mode 100644 tests/regress/expected7/test_partition.out delete mode 100644 tests/regress/expected7/test_pause_and_resume.out delete mode 100644 tests/regress/expected7/test_pause_and_resume_multiple_db.out delete mode 100644 tests/regress/expected7/test_primary_failure.out delete mode 100644 tests/regress/expected7/test_quota_view_no_table.out delete mode 100644 tests/regress/expected7/test_readiness_logged.out delete mode 100644 tests/regress/expected7/test_recreate.out delete mode 100644 tests/regress/expected7/test_rejectmap_mul_db.out delete mode 100644 tests/regress/expected7/test_relation_size.out delete mode 100644 tests/regress/expected7/test_rename.out delete mode 100644 tests/regress/expected7/test_reschema.out delete mode 100644 tests/regress/expected7/test_role.out delete mode 100644 tests/regress/expected7/test_schema.out delete mode 100644 tests/regress/expected7/test_show_status.out delete mode 100644 tests/regress/expected7/test_tablespace_diff_schema.out delete mode 100644 tests/regress/expected7/test_tablespace_role.out delete mode 100644 tests/regress/expected7/test_tablespace_role_perseg.out delete mode 100644 tests/regress/expected7/test_tablespace_schema.out delete mode 100644 tests/regress/expected7/test_tablespace_schema_perseg.out delete mode 100644 tests/regress/expected7/test_temp_role.out delete mode 100644 tests/regress/expected7/test_toast.out delete mode 100644 tests/regress/expected7/test_truncate.out delete mode 100644 tests/regress/expected7/test_update.out delete mode 100644 tests/regress/expected7/test_update_db_cache.out delete mode 100644 tests/regress/expected7/test_vacuum.out delete mode 100644 tests/regress/expected7/test_worker_not_ready.out delete mode 100644 tests/regress/expected7/test_worker_schedule.out delete mode 100644 tests/regress/expected7/test_worker_schedule_exception.out diff --git a/cmake/Regress.cmake b/cmake/Regress.cmake index 62d8807fb37..11f23f47b12 100644 --- a/cmake/Regress.cmake +++ b/cmake/Regress.cmake @@ -2,8 +2,8 @@ # # Usage: # RegressTarget_Add( -# SQL_DIR -# EXPECTED_DIR +# SQL_DIR [ ...] +# EXPECTED_DIR [ ...] # RESULTS_DIR # [INIT_FILE ...] # [SCHEDULE_FILE ...] @@ -25,6 +25,13 @@ # - regress_show_diff.sh # - regress_loop.sh # +# NOTE: If the input sql file extension is ".in.sql" instead of ".sql", the "@VAR@" in the input +# file will be replaced by the corresponding cmake VAR before tests are executed. +# +# NOTE: The directory that comes later in the SQL_DIR/EXPECTED_DIR list has a higher priory. The +# test case with the same name will be overwritten by the case that comes after in the directory +# list.t +# # Example: # RegressTarget_Add(installcheck_avro_fmt # REGRESS ${avro_regress_TARGETS} @@ -55,6 +62,12 @@ endfunction() # Find all tests in the given directory which uses fault injector, and add them to # fault_injector_test_list. function(_Find_FaultInjector_Tests sql_DIR) + if (NOT fault_injector_test_list) + set(fault_injector_test_list "" PARENT_SCOPE) + endif() + set(test_list ${fault_injector_test_list}) + + get_filename_component(sql_DIR ${sql_DIR} ABSOLUTE) file(GLOB files "${sql_DIR}/*.sql") foreach(f ${files}) set(ret 1) @@ -64,20 +77,38 @@ function(_Find_FaultInjector_Tests sql_DIR) OUTPUT_QUIET RESULT_VARIABLE ret) if(ret EQUAL 0) - get_filename_component(test_name ${f} NAME_WLE) - list(APPEND fault_injector_test_list ${test_name}) + get_filename_component(test_name ${f} NAME_WE) + if (NOT test_name IN_LIST test_list) + list(APPEND test_list ${test_name}) + endif() endif() endforeach() - set(fault_injector_test_list ${fault_injector_test_list} PARENT_SCOPE) + set(fault_injector_test_list ${test_list} PARENT_SCOPE) +endfunction() + +# Create symbolic links in the binary dir to input SQL files. +function(_Link_Test_Files src_DIR dest_DIR suffix) + get_filename_component(src_DIR ${src_DIR} ABSOLUTE) + file(MAKE_DIRECTORY ${dest_DIR}) + file(GLOB files "${src_DIR}/*.${suffix}") + foreach(f ${files}) + get_filename_component(file_name ${f} NAME) + file(CREATE_LINK ${f} ${dest_DIR}/${file_name} SYMBOLIC) + endforeach() + file(GLOB files "${src_DIR}/*.in.${suffix}") + foreach(f ${files}) + get_filename_component(file_name ${f} NAME_WE) + configure_file(${f} ${dest_DIR}/${file_name}.${suffix}) + endforeach() endfunction() function(RegressTarget_Add name) cmake_parse_arguments( arg "" - "SQL_DIR;EXPECTED_DIR;RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES;EXCLUDE_FAULT_INJECT_TEST" - "REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" + "RESULTS_DIR;DATA_DIR;REGRESS_TYPE;RUN_TIMES;EXCLUDE_FAULT_INJECT_TEST" + "SQL_DIR;EXPECTED_DIR;REGRESS;EXCLUDE;REGRESS_OPTS;INIT_FILE;SCHEDULE_FILE" ${ARGN}) if (NOT arg_EXPECTED_DIR) message(FATAL_ERROR @@ -106,10 +137,19 @@ function(RegressTarget_Add name) endif() endif() - # Find all tests using fault injector - if(arg_EXCLUDE_FAULT_INJECT_TEST) - _Find_FaultInjector_Tests(${arg_SQL_DIR}) - endif() + # Link input sql files to the build dir + foreach(sql_DIR IN LISTS arg_SQL_DIR) + _Link_Test_Files(${sql_DIR} ${working_DIR}/sql sql) + # Find all tests using fault injector + if(arg_EXCLUDE_FAULT_INJECT_TEST) + _Find_FaultInjector_Tests(${sql_DIR}) + endif() + endforeach() + + # Link output out files to the build dir + foreach(expected_DIR IN LISTS arg_EXPECTED_DIR) + _Link_Test_Files(${expected_DIR} ${working_DIR}/expected out) + endforeach() # Set REGRESS test cases foreach(r IN LISTS arg_REGRESS) @@ -148,8 +188,6 @@ function(RegressTarget_Add name) set(regress_opts_arg ${regress_opts_arg} ${o}) endforeach() - get_filename_component(sql_DIR ${arg_SQL_DIR} ABSOLUTE) - get_filename_component(expected_DIR ${arg_EXPECTED_DIR} ABSOLUTE) get_filename_component(results_DIR ${arg_RESULTS_DIR} ABSOLUTE) if (arg_DATA_DIR) get_filename_component(data_DIR ${arg_DATA_DIR} ABSOLUTE) @@ -171,10 +209,6 @@ function(RegressTarget_Add name) add_custom_target( ${name} WORKING_DIRECTORY ${working_DIR} - COMMAND rm -f sql - COMMAND ln -s ${sql_DIR} sql - COMMAND rm -f expected - COMMAND ln -s ${expected_DIR} expected COMMAND rm -f results COMMAND mkdir -p ${results_DIR} COMMAND ln -s ${results_DIR} results diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 13cc145a2bc..07abaf69569 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,7 +1,15 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) +list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected) +list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected) if (${GP_MAJOR_VERSION} EQUAL 7) set(EXPECTED_DIR_SUFFIX "7") + list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected7) + list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected7) + # PLPYTHON_LANG_STR will be replaced by Regress.cmake + set(PLPYTHON_LANG_STR "plpython3u") +else() + set(PLPYTHON_LANG_STR "plpython2u") endif() set(exclude_fault_injector OFF) @@ -15,7 +23,7 @@ RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/sql - EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected${EXPECTED_DIR_SUFFIX} + EXPECTED_DIR ${regress_expected_DIR} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule${EXPECTED_DIR_SUFFIX} @@ -31,7 +39,7 @@ RegressTarget_Add(isolation2 INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql - EXPECTED_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected${EXPECTED_DIR_SUFFIX} + EXPECTED_DIR ${isolation2_expected_DIR} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule${EXPECTED_DIR_SUFFIX} diff --git a/tests/init_file b/tests/init_file index 477b135590c..ff2ea6bf197 100644 --- a/tests/init_file +++ b/tests/init_file @@ -18,6 +18,8 @@ m/diskquota_utility.c:\d+\)/ s/diskquota_utility.c:\d+\)/diskquota_utility.c:xxx/ m/^CONTEXT:*/ s/^CONTEXT:/DETAIL:/ +m/plpython\du/ +s/plpython\du/plpythonu/ # Remove segment identifiers from error message. # E.g., (slice1 XXX.XXX.XXX.XXX:XXXX pid=XXXX) diff --git a/tests/isolation2/expected/test_rejectmap.out b/tests/isolation2/expected/test_rejectmap.out index 5e15acceb67..2ed02900e8d 100644 --- a/tests/isolation2/expected/test_rejectmap.out +++ b/tests/isolation2/expected/test_rejectmap.out @@ -301,7 +301,7 @@ CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, re CREATE -- This function dumps given relation_cache entries to the given file. -CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: f.write(v['row'][1:-1] + '\n') $$ LANGUAGE plpythonu; +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython2u; CREATE -- This function reads relation_cache entries from the given file. @@ -310,7 +310,7 @@ CREATE -- This function replaces the oid appears in the auxiliary relation's name -- with the corresponding relname of that oid. -CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; CREATE -- This function helps dispatch rejectmap for the given relation to seg0. diff --git a/tests/isolation2/expected7/config.out b/tests/isolation2/expected7/config.out deleted file mode 100644 index 8ad8cbd0d08..00000000000 --- a/tests/isolation2/expected7/config.out +++ /dev/null @@ -1,30 +0,0 @@ - -!\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); -(exited with code 0) -!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; -(exited with code 0) -!\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; -(exited with code 0) - -!\retcode gpstop -raf; -(exited with code 0) - --- Show the values of all GUC variables ---start_ignore --- naptime cannot be 0 for release build -1: SHOW diskquota.naptime; - diskquota.naptime -------------------- - 0 -(1 row) ---end_ignore -1: SHOW diskquota.max_active_tables; - diskquota.max_active_tables ------------------------------ - 307200 -(1 row) -1: SHOW diskquota.worker_timeout; - diskquota.worker_timeout --------------------------- - 60 -(1 row) diff --git a/tests/isolation2/expected7/reset_config.out b/tests/isolation2/expected7/reset_config.out deleted file mode 100644 index 3d076b36cca..00000000000 --- a/tests/isolation2/expected7/reset_config.out +++ /dev/null @@ -1,10 +0,0 @@ -!\retcode gpconfig -c diskquota.naptime -v 2; -(exited with code 0) -!\retcode gpstop -u; -(exited with code 0) - -1: SHOW diskquota.naptime; - diskquota.naptime -------------------- - 2 -(1 row) diff --git a/tests/isolation2/expected7/test_create_extension.out b/tests/isolation2/expected7/test_create_extension.out deleted file mode 100644 index 211ebd639f6..00000000000 --- a/tests/isolation2/expected7/test_create_extension.out +++ /dev/null @@ -1,15 +0,0 @@ -CREATE EXTENSION diskquota; -CREATE - -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- Wait after init so that diskquota.state is clean -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) diff --git a/tests/isolation2/expected7/test_drop_extension.out b/tests/isolation2/expected7/test_drop_extension.out deleted file mode 100644 index 4a9e4ecb16f..00000000000 --- a/tests/isolation2/expected7/test_drop_extension.out +++ /dev/null @@ -1,12 +0,0 @@ -SELECT diskquota.pause(); - pause -------- - -(1 row) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -DROP EXTENSION diskquota; -DROP diff --git a/tests/isolation2/expected7/test_fast_quota_view.out b/tests/isolation2/expected7/test_fast_quota_view.out deleted file mode 100644 index 22bde74857d..00000000000 --- a/tests/isolation2/expected7/test_fast_quota_view.out +++ /dev/null @@ -1,182 +0,0 @@ -CREATE SCHEMA s1; -CREATE -CREATE SCHEMA s2; -CREATE - -CREATE ROLE r LOGIN SUPERUSER; -CREATE - -!\retcode mkdir -p /tmp/spc1; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode mkdir -p /tmp/spc2; --- start_ignore - --- end_ignore -(exited with code 0) - -DROP TABLESPACE IF EXISTS spc1; -DROP -CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; -CREATE -DROP TABLESPACE IF EXISTS spc2; -DROP -CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; -CREATE - -SELECT diskquota.set_schema_quota('s1', '100 MB'); - set_schema_quota ------------------- - -(1 row) -SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) -SELECT diskquota.set_role_quota('r', '100 MB'); - set_role_quota ----------------- - -(1 row) -SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - --- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view -1: BEGIN; -BEGIN -1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); -CREATE -1: INSERT INTO s1.t SELECT generate_series(1, 100000); -INSERT 100000 - -1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); -CREATE -1: INSERT INTO s2.t SELECT generate_series(1, 100000); -INSERT 100000 - -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- check schema quota view before transaction commits -2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - s1 | 100 | 3932160 -(1 row) -2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - s2 | spc1 | 100 | 3932160 -(1 row) - -1: COMMIT; -COMMIT -2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - s1 | 100 | 3932160 -(1 row) -2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - s2 | spc1 | 100 | 3932160 -(1 row) - --- login r to test role quota view -1: SET ROLE r; -SET - --- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view -1: BEGIN; -BEGIN -1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); -CREATE -1: INSERT INTO t1 SELECT generate_series(1, 100000); -INSERT 100000 - -1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); -CREATE -1: INSERT INTO t2 SELECT generate_series(1, 100000); -INSERT 100000 - -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- check role quota view before transaction commits -2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - r | 100 | 7864320 -(1 row) -2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - r | spc2 | 100 | 3932160 -(1 row) - -1: COMMIT; -COMMIT -2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - r | 100 | 7864320 -(1 row) -2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - r | spc2 | 100 | 3932160 -(1 row) - -DROP TABLE IF EXISTS s1.t; -DROP -DROP TABLE IF EXISTS s2.t; -DROP -DROP TABLE IF EXISTS t1; -DROP -DROP TABLE IF EXISTS t2; -DROP - -DROP SCHEMA IF EXISTS s1; -DROP -DROP SCHEMA IF EXISTS s2; -DROP -DROP ROLE IF EXISTS r; -DROP - -DROP TABLESPACE IF EXISTS spc1; -DROP -DROP TABLESPACE IF EXISTS spc2; -DROP - -!\retcode rm -rf /tmp/spc1; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode rm -rf /tmp/spc2; --- start_ignore - --- end_ignore -(exited with code 0) diff --git a/tests/isolation2/expected7/test_postmaster_restart.out b/tests/isolation2/expected7/test_postmaster_restart.out deleted file mode 100644 index 5f01eee9379..00000000000 --- a/tests/isolation2/expected7/test_postmaster_restart.out +++ /dev/null @@ -1,139 +0,0 @@ -!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode gpstop -u > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) - -1: CREATE SCHEMA postmaster_restart_s; -CREATE -1: SET search_path TO postmaster_restart_s; -SET - -1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect fail -1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 157893 (seg0 127.0.0.1:6002 pid=1025673) -1q: ... - --- launcher should exist --- [p]ostgres is to filter out the pgrep itself -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore -2774491 - --- end_ignore -(exited with code 0) --- bgworker should exist -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore -2774659 - --- end_ignore -(exited with code 0) - --- stop postmaster -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; --- start_ignore -waiting for server to shut down.... done -server stopped --- end_ignore -(exited with code 0) - --- launcher should be terminated -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore - --- end_ignore -(exited with code 1) --- bgworker should be terminated -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore - --- end_ignore -(exited with code 1) - --- start postmaster --- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 --- See https://github.com/greenplum-db/gpdb/pull/9396 -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; --- start_ignore -waiting for server to start....2022-02-14 21:41:39.147869 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""ftsprobe process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.147899 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""dtx recovery process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.147934 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.148550 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""[diskquota] - launcher""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.272714 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""pg_log"".",,,,,,"SysLogger_Start","syslogger.c",986, - done -server started - --- end_ignore -(exited with code 0) --- Hopefully the bgworker can be started in 5 seconds -!\retcode sleep 5; --- start_ignore - --- end_ignore -(exited with code 0) - --- launcher should be restarted -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore -2771049 - --- end_ignore -(exited with code 0) --- bgworker should be restarted -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore -2771074 - --- end_ignore -(exited with code 0) - -1: SET search_path TO postmaster_restart_s; -SET -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- expect fail -1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 158089 (seg0 127.0.0.1:6002 pid=1027799) --- enlarge the quota limits -1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- expect succeed -1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); -CREATE 1000000 - -1: DROP SCHEMA postmaster_restart_s CASCADE; -DROP -1q: ... -!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) diff --git a/tests/isolation2/expected7/test_rejectmap.out b/tests/isolation2/expected7/test_rejectmap.out deleted file mode 100644 index 5e15acceb67..00000000000 --- a/tests/isolation2/expected7/test_rejectmap.out +++ /dev/null @@ -1,738 +0,0 @@ --- --- This file contains tests for dispatching rejectmap and canceling --- queries in smgrextend hook by relation's relfilenode. --- - --- Enable check quota by relfilenode on seg0. -SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- this function return valid tablespaceoid. --- For role/namespace quota, return as it is. --- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. -CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; -CREATE - --- 1. Test canceling the extending of an ordinary table. -CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); -CREATE -INSERT INTO blocked_t1 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 2. Test canceling the extending of a toast relation. -CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); -CREATE -INSERT INTO blocked_t2 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 3. Test canceling the extending of an appendonly relation. -CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE -INSERT INTO blocked_t3 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 4. Test canceling the extending of an index relation. -CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); -CREATE -CREATE INDEX blocked_t4_index ON blocked_t4(i); -CREATE -INSERT INTO blocked_t4 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. -CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); -CREATE -INSERT INTO blocked_t5 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - -SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); - block_relation_on_seg0 ------------------------- - -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. -CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); -CREATE -INSERT INTO blocked_t6 SELECT generate_series(1, 100); -INSERT 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - -1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - -SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); - block_relation_on_seg0 ------------------------- - -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- Do some clean-ups. -DROP TABLE blocked_t1; -DROP -DROP TABLE blocked_t2; -DROP -DROP TABLE blocked_t3; -DROP -DROP TABLE blocked_t4; -DROP -DROP TABLE blocked_t5; -DROP -DROP TABLE blocked_t6; -DROP - --- --- Below are helper functions for testing adding uncommitted relations to rejectmap. --- --- start_ignore -CREATE OR REPLACE LANGUAGE plpythonu; -CREATE --- end_ignore -CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); -CREATE - --- This function dumps given relation_cache entries to the given file. -CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: f.write(v['row'][1:-1] + '\n') $$ LANGUAGE plpythonu; -CREATE - --- This function reads relation_cache entries from the given file. -CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpythonu; -CREATE - --- This function replaces the oid appears in the auxiliary relation's name --- with the corresponding relname of that oid. -CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; -CREATE - --- This function helps dispatch rejectmap for the given relation to seg0. -CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; -CREATE - --- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+----------------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+----------------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 13. Test that we are able to block a toast relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+---------------------------+-----------------+------------ - 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(3 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(4 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -CREATE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(4 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) -1: ABORT; -ABORT --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- Reset fault injection points set by us at the top of this test. -SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) diff --git a/tests/isolation2/expected7/test_relation_cache.out b/tests/isolation2/expected7/test_relation_cache.out deleted file mode 100644 index df61fdb810f..00000000000 --- a/tests/isolation2/expected7/test_relation_cache.out +++ /dev/null @@ -1,70 +0,0 @@ -CREATE DATABASE tempdb1; -CREATE -CREATE DATABASE tempdb2; -CREATE - --- perpare extension -1:@db_name tempdb1: CREATE EXTENSION diskquota; -CREATE -1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2:@db_name tempdb2: CREATE EXTENSION diskquota; -CREATE -2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- create a table in tempdb1 -1:@db_name tempdb1: BEGIN; -BEGIN -1:@db_name tempdb1: CREATE TABLE t(i int); -CREATE -1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); -INSERT 10000 - --- query relation_cache in tempdb2 -2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); - count -------- - 0 -(1 row) - -1:@db_name tempdb1: ABORT; -ABORT - -1:@db_name tempdb1: SELECT diskquota.pause(); - pause -------- - -(1 row) -1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1:@db_name tempdb1: DROP EXTENSION diskquota; -DROP -2:@db_name tempdb2: SELECT diskquota.pause(); - pause -------- - -(1 row) -2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2:@db_name tempdb2: DROP EXTENSION diskquota; -DROP -1q: ... -2q: ... - -DROP DATABASE tempdb1; -DROP -DROP DATABASE tempdb2; -DROP diff --git a/tests/isolation2/expected7/test_relation_size.out b/tests/isolation2/expected7/test_relation_size.out deleted file mode 100644 index 45e9a9cc149..00000000000 --- a/tests/isolation2/expected7/test_relation_size.out +++ /dev/null @@ -1,87 +0,0 @@ --- --- 1. Test that when a relation is dropped before diskquota.relation_size() --- applying stat(2) on the physical file, diskquota.relation_size() consumes --- the error and returns 0. --- - -CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); -CREATE --- Insert a small amount of data to 't_dropped'. -INSERT INTO t_dropped SELECT generate_series(1, 100); -INSERT 100 --- Shows that the size of relfilenode is not zero. -SELECT diskquota.relation_size('t_dropped'); - relation_size ---------------- - 98304 -(1 row) - --- Inject 'suspension' to servers. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) - --- Session 1 will hang before applying stat(2) to the physical file. -1&: SELECT diskquota.relation_size('t_dropped'); --- Wait until the fault is triggered to avoid the following race condition: --- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" --- and the query will fail with 'ERROR: relation "t_dropped" does not exist' -SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Drop the table. -DROP TABLE t_dropped; -DROP --- Remove the injected 'suspension'. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) --- Session 1 will continue and returns 0. -1<: <... completed> - relation_size ---------------- - 0 -(1 row) - --- 2. Test whether relation size is correct under concurrent writes for AO tables. --- Since no row is deleted, diskquota.relation_size() should be equal to --- pg_relation_size(). - -CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE -1: BEGIN; -BEGIN -1: INSERT INTO t_ao SELECT generate_series(1, 10000); -INSERT 10000 -2: BEGIN; -BEGIN -2: INSERT INTO t_ao SELECT generate_series(1, 10000); -INSERT 10000 -1: COMMIT; -COMMIT -2: COMMIT; -COMMIT -SELECT diskquota.relation_size('t_ao'); - relation_size ---------------- - 200400 -(1 row) -SELECT pg_relation_size('t_ao'); - pg_relation_size ------------------- - 200400 -(1 row) -DROP TABLE t_ao; -DROP diff --git a/tests/isolation2/expected7/test_truncate.out b/tests/isolation2/expected7/test_truncate.out deleted file mode 100644 index 4964f6ec177..00000000000 --- a/tests/isolation2/expected7/test_truncate.out +++ /dev/null @@ -1,86 +0,0 @@ --- Test various race conditions for TRUNCATE. - --- Case 1: Pulling active table before swapping relfilenode -CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); -CREATE -INSERT INTO dummy_t1 SELECT generate_series(1, 1000); -INSERT 1000 --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 98304 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+-------+------- - dummy_t1 | 98304 | -1 - dummy_t1 | 32768 | 0 - dummy_t1 | 32768 | 1 - dummy_t1 | 32768 | 2 -(4 rows) - -SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1&: TRUNCATE dummy_t1; -SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -1<: <... completed> -TRUNCATE - --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 0 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+------+------- - dummy_t1 | 0 | -1 - dummy_t1 | 0 | 0 - dummy_t1 | 0 | 1 - dummy_t1 | 0 | 2 -(4 rows) -DROP TABLE dummy_t1; -DROP diff --git a/tests/isolation2/expected7/test_vacuum.out b/tests/isolation2/expected7/test_vacuum.out deleted file mode 100644 index eb43793236e..00000000000 --- a/tests/isolation2/expected7/test_vacuum.out +++ /dev/null @@ -1,99 +0,0 @@ --- This file tests various race conditions when performing 'VACUUM FULL'. - --- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized --- as the following 3 steps: --- s1) create a new temporary relation (smgrcreate hook will be triggered, newly --- created relfilenode will be put into shmem). --- s2) insert data into the newly created relation from the old relation (smgrextend --- hook will be triggered, newly created relfilenode will be put into shmem). --- s3) change the old relation's relfilenode to the newly created one. --- Consider the following situation: --- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, --- the newly created relfilenode is translated to the newly created temporary relation's oid, --- the old relation's size cannot be updated. We resolve it by making altered relations' oids --- constantly active so that the diskquota bgworker keeps updating the altered relation size --- during 'VACUUM FULL'. -CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); -CREATE -INSERT INTO dummy_t1 SELECT generate_series(1, 1000); -INSERT 1000 -DELETE FROM dummy_t1; -DELETE 1000 --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 98304 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+-------+------- - dummy_t1 | 98304 | -1 - dummy_t1 | 32768 | 0 - dummy_t1 | 32768 | 1 - dummy_t1 | 32768 | 2 -(4 rows) -SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1&: VACUUM FULL dummy_t1; -SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -1<: <... completed> -VACUUM - --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 0 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+------+------- - dummy_t1 | 0 | -1 - dummy_t1 | 0 | 0 - dummy_t1 | 0 | 1 - dummy_t1 | 0 | 2 -(4 rows) -DROP TABLE dummy_t1; -DROP diff --git a/tests/isolation2/expected7/test_worker_timeout.out b/tests/isolation2/expected7/test_worker_timeout.out deleted file mode 100644 index 5f855a7b80c..00000000000 --- a/tests/isolation2/expected7/test_worker_timeout.out +++ /dev/null @@ -1,38 +0,0 @@ -!\retcode gpconfig -c diskquota.worker_timeout -v 1; -(exited with code 0) -!\retcode gpstop -u; -(exited with code 0) - -SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - -1&: SELECT diskquota.wait_for_worker_new_epoch(); - -SELECT pg_sleep(2 * current_setting('diskquota.worker_timeout')::int); - pg_sleep ----------- - -(1 row) - -SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT diskquota.wait_for_worker_new_epoch();'; - pg_cancel_backend -------------------- - t -(1 row) - -SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - -1<: <... completed> -ERROR: canceling statement due to user request - -!\retcode gpconfig -r diskquota.worker_timeout; -(exited with code 0) -!\retcode gpstop -u; -(exited with code 0) diff --git a/tests/isolation2/isolation2_schedule7 b/tests/isolation2/isolation2_schedule7 index 56792ee63e8..04e9b5c7aa5 100644 --- a/tests/isolation2/isolation2_schedule7 +++ b/tests/isolation2/isolation2_schedule7 @@ -2,7 +2,7 @@ test: config test: test_create_extension test: test_fast_quota_view test: test_relation_size -# test: test_rejectmap +test: test_rejectmap test: test_vacuum test: test_truncate # test: test_postmaster_restart diff --git a/tests/isolation2/sql/test_rejectmap.sql b/tests/isolation2/sql/test_rejectmap.in.sql similarity index 97% rename from tests/isolation2/sql/test_rejectmap.sql rename to tests/isolation2/sql/test_rejectmap.in.sql index 41267c56b57..3ad115f12c4 100644 --- a/tests/isolation2/sql/test_rejectmap.sql +++ b/tests/isolation2/sql/test_rejectmap.in.sql @@ -230,7 +230,7 @@ DROP TABLE blocked_t6; -- Below are helper functions for testing adding uncommitted relations to rejectmap. -- -- start_ignore -CREATE OR REPLACE LANGUAGE plpythonu; +CREATE OR REPLACE LANGUAGE @PLPYTHON_LANG_STR@; -- end_ignore CREATE TYPE cached_relation_entry AS ( reloid oid, @@ -253,18 +253,27 @@ AS $$ """) with open(filename, 'wt') as f: for v in rv: - f.write(v['row'][1:-1] + '\n') -$$ LANGUAGE plpythonu; + row = v['row'] + # The composite type results are different between GP6 & GP7 + if isinstance(row, dict): + r = "{0},{1},{2},{3},{4},{5},{6}".format( + row['reloid'], row['relname'], row['relowner'], + row['relnamespace'], row['reltablespace'], + row['relfilenode'], row['segid']) + else: + r = row[1:-1] + f.write(r + '\n') +$$ LANGUAGE @PLPYTHON_LANG_STR@; -- This function reads relation_cache entries from the given file. CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ - with open(filename) as f: - for l in f: - r = l.split(',') - yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) -$$ LANGUAGE plpythonu; + with open(filename) as f: + for l in f: + r = l.split(',') + yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) +$$ LANGUAGE @PLPYTHON_LANG_STR@; -- This function replaces the oid appears in the auxiliary relation's name -- with the corresponding relname of that oid. @@ -275,9 +284,9 @@ CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename te REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ - (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ - WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' - AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name);/*in func*/ + (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ + WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ + AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; diff --git a/tests/regress/expected7/config.out b/tests/regress/expected7/config.out deleted file mode 100644 index d266f9bf501..00000000000 --- a/tests/regress/expected7/config.out +++ /dev/null @@ -1,70 +0,0 @@ ---start_ignore -CREATE DATABASE diskquota; -ERROR: database "diskquota" already exists -\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); -20230117:12:40:53:1895897 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c shared_preload_libraries -v diskquota-2.2.so' -\! gpconfig -c diskquota.naptime -v 0 --skipvalidation -20230117:12:40:53:1896062 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' -\! gpconfig -c max_worker_processes -v 20 --skipvalidation -20230117:12:40:54:1896331 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 20 --skipvalidation' -\! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation -20230117:12:40:55:1896588 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.hard_limit -v off --skipvalidation' -\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation -20230117:12:40:55:1896848 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' -\! gpconfig -c log_min_messages -v debug1 -20230117:12:40:56:1897088 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c log_min_messages -v debug1' -\! gpstop -raf -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -raf -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:40:56:1897362 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:12:40:57:1897362 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:12:40:58:1897362 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore -\c --- Show the values of all GUC variables --- start_ignore -SHOW diskquota.naptime; - diskquota.naptime -------------------- - 0 -(1 row) - --- end_ignore -SHOW diskquota.max_active_tables; - diskquota.max_active_tables ------------------------------ - 307200 -(1 row) - -SHOW diskquota.worker_timeout; - diskquota.worker_timeout --------------------------- - 60 -(1 row) - -SHOW diskquota.hard_limit; - diskquota.hard_limit ----------------------- - off -(1 row) - diff --git a/tests/regress/expected7/reset_config.out b/tests/regress/expected7/reset_config.out deleted file mode 100644 index 9f6797259a7..00000000000 --- a/tests/regress/expected7/reset_config.out +++ /dev/null @@ -1,17 +0,0 @@ ---start_ignore -\! gpconfig -c diskquota.naptime -v 2 -20230117:13:11:41:2012767 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 2' -\! gpstop -u -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -u -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:11:41:2012942 gpstop:zhrt:zhrt-[INFO]:-Signalling all postmaster processes to reload ---end_ignore -SHOW diskquota.naptime; - diskquota.naptime -------------------- - 2 -(1 row) - diff --git a/tests/regress/expected7/test_activetable_limit.out b/tests/regress/expected7/test_activetable_limit.out deleted file mode 100644 index c556f32bb38..00000000000 --- a/tests/regress/expected7/test_activetable_limit.out +++ /dev/null @@ -1,56 +0,0 @@ --- table in 'diskquota not enabled database' should not be activetable -\! gpconfig -c diskquota.max_active_tables -v 2 > /dev/null -\! gpstop -arf > /dev/null -\c -CREATE DATABASE test_tablenum_limit_01; -CREATE DATABASE test_tablenum_limit_02; -\c test_tablenum_limit_01 -CREATE TABLE a01(i int) DISTRIBUTED BY (i); -CREATE TABLE a02(i int) DISTRIBUTED BY (i); -CREATE TABLE a03(i int) DISTRIBUTED BY (i); -INSERT INTO a01 values(generate_series(0, 500)); -INSERT INTO a02 values(generate_series(0, 500)); -INSERT INTO a03 values(generate_series(0, 500)); -\c test_tablenum_limit_02 -CREATE EXTENSION diskquota; -CREATE SCHEMA s; -SELECT diskquota.set_schema_quota('s', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE s.t1(i int) DISTRIBUTED BY (i); -- activetable = 1 -INSERT INTO s.t1 SELECT generate_series(1, 100000); -- ok. diskquota soft limit does not check when first write -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE s.t2(i int) DISTRIBUTED BY (i); -- activetable = 2 -INSERT INTO s.t2 SELECT generate_series(1, 10); -- expect failed -ERROR: schema's disk space quota exceeded with name: s -CREATE TABLE s.t3(i int) DISTRIBUTED BY (i); -- activetable = 3 should not crash. -INSERT INTO s.t3 SELECT generate_series(1, 10); -- expect failed -ERROR: schema's disk space quota exceeded with name: s --- Q: why diskquota still works when activetable = 3? --- A: the activetable limit by shmem size, calculate by hash_estimate_size() --- the result will bigger than sizeof(DiskQuotaActiveTableEntry) * max_active_tables --- the real capacity of this data structure based on the hash conflict probability. --- so we can not predict when the data structure will be fill in fully. --- --- this test case is useless, remove this if anyone dislike it. --- but the hash capacity is smaller than 6, so the test case works for issue 51 -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_tablenum_limit_01; -DROP DATABASE test_tablenum_limit_02; -\! gpconfig -r diskquota.max_active_tables > /dev/null -\! gpstop -arf > /dev/null diff --git a/tests/regress/expected7/test_clean_rejectmap_after_drop.out b/tests/regress/expected7/test_clean_rejectmap_after_drop.out deleted file mode 100644 index 30c63756cce..00000000000 --- a/tests/regress/expected7/test_clean_rejectmap_after_drop.out +++ /dev/null @@ -1,42 +0,0 @@ -CREATE DATABASE test_clean_rejectmap_after_drop; -\c test_clean_rejectmap_after_drop -CREATE EXTENSION diskquota; -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -CREATE ROLE r; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('r', '1MB'); - set_role_quota ----------------- - -(1 row) - -CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -ALTER TABLE b OWNER TO r; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: role's disk space quota exceeded with name: 40071 (seg1 127.0.0.1:7003 pid=1958088) -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -INSERT INTO b SELECT generate_series(1, 100); -- ok -\c contrib_regression -DROP DATABASE test_clean_rejectmap_after_drop; -DROP ROLE r; -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_column.out b/tests/regress/expected7/test_column.out deleted file mode 100644 index a5eb051c755..00000000000 --- a/tests/regress/expected7/test_column.out +++ /dev/null @@ -1,42 +0,0 @@ --- Test alter table add column -CREATE SCHEMA scolumn; -SELECT diskquota.set_schema_quota('scolumn', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO scolumn; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE a2(i INT) DISTRIBUTED BY (i); --- expect fail -INSERT INTO a2 SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect fail -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: scolumn -ALTER TABLE a2 ADD COLUMN j VARCHAR(50); -UPDATE a2 SET j = 'add value for column j'; -ERROR: schema's disk space quota exceeded with name: scolumn -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert failed after add column -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: scolumn -DROP TABLE a2; -RESET search_path; -DROP SCHEMA scolumn; diff --git a/tests/regress/expected7/test_copy.out b/tests/regress/expected7/test_copy.out deleted file mode 100644 index 2c3fff9ff84..00000000000 --- a/tests/regress/expected7/test_copy.out +++ /dev/null @@ -1,26 +0,0 @@ --- Test copy -CREATE SCHEMA s3; -SELECT diskquota.set_schema_quota('s3', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s3; -\! seq 100 > /tmp/csmall.txt -CREATE TABLE c (i int) DISTRIBUTED BY (i); -COPY c FROM '/tmp/csmall.txt'; --- expect failed -INSERT INTO c SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect copy fail -COPY c FROM '/tmp/csmall.txt'; -ERROR: schema's disk space quota exceeded with name: s3 -DROP TABLE c; -RESET search_path; -DROP SCHEMA s3; diff --git a/tests/regress/expected7/test_create_extension.out b/tests/regress/expected7/test_create_extension.out deleted file mode 100644 index a90178ce350..00000000000 --- a/tests/regress/expected7/test_create_extension.out +++ /dev/null @@ -1,14 +0,0 @@ -CREATE EXTENSION diskquota; -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- Wait after init so that diskquota.state is clean -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - diff --git a/tests/regress/expected7/test_ctas_before_set_quota.out b/tests/regress/expected7/test_ctas_before_set_quota.out deleted file mode 100644 index ac69b2b5226..00000000000 --- a/tests/regress/expected7/test_ctas_before_set_quota.out +++ /dev/null @@ -1,61 +0,0 @@ -CREATE ROLE test SUPERUSER; -SET ROLE test; -CREATE TABLE t_before_set_quota (i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size -WHERE tableid = 't_before_set_quota'::regclass ORDER BY segid; - tableid | size | segid ---------------------+---------+------- - t_before_set_quota | 3637248 | -1 - t_before_set_quota | 1212416 | 0 - t_before_set_quota | 1212416 | 1 - t_before_set_quota | 1212416 | 2 -(4 rows) - --- Ensure that the table is not active -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - diskquota_fetch_table_stat ----------------------------- -(0 rows) - -SELECT diskquota.set_role_quota(current_role, '1MB'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Expect that current role is in the rejectmap -SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; - rolname ---------- - test -(1 row) - -SELECT diskquota.set_role_quota(current_role, '-1'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP TABLE t_before_set_quota; -RESET ROLE; -DROP ROLE test; diff --git a/tests/regress/expected7/test_ctas_no_preload_lib.out b/tests/regress/expected7/test_ctas_no_preload_lib.out deleted file mode 100644 index b85a18ac92b..00000000000 --- a/tests/regress/expected7/test_ctas_no_preload_lib.out +++ /dev/null @@ -1,85 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -far > /dev/null -\c -CREATE ROLE test SUPERUSER; -SET ROLE test; --- Create table with diskquota disabled -CREATE TABLE t_without_diskquota (i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -\! gpconfig -c shared_preload_libraries -v $(./data/current_binary_name) > /dev/null -\! gpstop -far > /dev/null -\c -SET ROLE test; --- Init table_size to include the table -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- Restart to load diskquota.table_size to the memory. -\! gpstop -far > /dev/null -\c -SET ROLE test; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size -WHERE tableid = 't_without_diskquota'::regclass ORDER BY segid; - tableid | size | segid ----------------------+---------+------- - t_without_diskquota | 3637248 | -1 - t_without_diskquota | 1212416 | 0 - t_without_diskquota | 1212416 | 1 - t_without_diskquota | 1212416 | 2 -(4 rows) - --- Ensure that the table is not active -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - diskquota_fetch_table_stat ----------------------------- -(0 rows) - -SELECT diskquota.set_role_quota(current_role, '1MB'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Expect that current role is in the rejectmap -SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; - rolname ---------- - test -(1 row) - -SELECT diskquota.set_role_quota(current_role, '-1'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT rolname FROM pg_authid, diskquota.rejectmap WHERE oid = target_oid; - rolname ---------- -(0 rows) - -DROP TABLE t_without_diskquota; -RESET ROLE; -DROP ROLE test; diff --git a/tests/regress/expected7/test_ctas_pause.out b/tests/regress/expected7/test_ctas_pause.out deleted file mode 100644 index 76e02f10be1..00000000000 --- a/tests/regress/expected7/test_ctas_pause.out +++ /dev/null @@ -1,37 +0,0 @@ -CREATE SCHEMA hardlimit_s; -SET search_path TO hardlimit_s; -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail -ERROR: schema's disk space quota exceeded with name: 40272 (seg0 127.0.0.1:7002 pid=1962803) -SELECT diskquota.pause(); - pause -------- - -(1 row) - -CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed --- disable hardlimit and do some clean-ups. -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.resume(); - resume --------- - -(1 row) - -DROP SCHEMA hardlimit_s CASCADE; -NOTICE: drop cascades to table t1 diff --git a/tests/regress/expected7/test_ctas_role.out b/tests/regress/expected7/test_ctas_role.out deleted file mode 100644 index facb95b5b12..00000000000 --- a/tests/regress/expected7/test_ctas_role.out +++ /dev/null @@ -1,81 +0,0 @@ --- Test that diskquota is able to cancel a running CTAS query by the role quota. --- start_ignore -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null --- end_ignore -CREATE ROLE hardlimit_r; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); - set_role_quota ----------------- - -(1 row) - -GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; -SET ROLE hardlimit_r; --- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- temp table -CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- toast table -CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -ERROR: role's disk space quota exceeded with name: 40279 (seg1 127.0.0.1:7003 pid=1964560) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- ao table -CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: role's disk space quota exceeded with name: 40279 (seg0 127.0.0.1:7002 pid=1964561) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- aocs table -CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ERROR: role's disk space quota exceeded with name: 40279 (seg0 127.0.0.1:7002 pid=1964561) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- disable hardlimit and do some clean-ups. -DROP TABLE IF EXISTS t1; -NOTICE: table "t1" does not exist, skipping -DROP TABLE IF EXISTS t2; -NOTICE: table "t2" does not exist, skipping -DROP TABLE IF EXISTS toast_table; -NOTICE: table "toast_table" does not exist, skipping -DROP TABLE IF EXISTS ao_table; -NOTICE: table "ao_table" does not exist, skipping -DROP TABLE IF EXISTS aocs_table; -NOTICE: table "aocs_table" does not exist, skipping -RESET ROLE; -REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; -DROP ROLE hardlimit_r; -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_ctas_schema.out b/tests/regress/expected7/test_ctas_schema.out deleted file mode 100644 index e2e810d6f53..00000000000 --- a/tests/regress/expected7/test_ctas_schema.out +++ /dev/null @@ -1,64 +0,0 @@ --- Test that diskquota is able to cancel a running CTAS query by the schema quota. -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -CREATE SCHEMA hardlimit_s; -SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO hardlimit_s; --- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: schema's disk space quota exceeded with name: 40394 (seg2 127.0.0.1:7004 pid=1966566) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- toast table -CREATE TABLE toast_table (i) - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -ERROR: schema's disk space quota exceeded with name: 40394 (seg1 127.0.0.1:7003 pid=1966565) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- ao table -CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: schema's disk space quota exceeded with name: 40394 (seg0 127.0.0.1:7002 pid=1966564) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- aocs table -CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ERROR: schema's disk space quota exceeded with name: 40394 (seg2 127.0.0.1:7004 pid=1966566) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- disable hardlimit and do some clean-ups. -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -DROP TABLE IF EXISTS t1; -NOTICE: table "t1" does not exist, skipping -DROP TABLE IF EXISTS toast_table; -NOTICE: table "toast_table" does not exist, skipping -DROP TABLE IF EXISTS ao_table; -NOTICE: table "ao_table" does not exist, skipping -DROP TABLE IF EXISTS aocs_table; -NOTICE: table "aocs_table" does not exist, skipping -RESET search_path; -DROP SCHEMA hardlimit_s; diff --git a/tests/regress/expected7/test_ctas_tablespace_role.out b/tests/regress/expected7/test_ctas_tablespace_role.out deleted file mode 100644 index c6d3bb6302e..00000000000 --- a/tests/regress/expected7/test_ctas_tablespace_role.out +++ /dev/null @@ -1,78 +0,0 @@ --- Test that diskquota is able to cancel a running CTAS query by the tablespace role quota. -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null --- start_ignore -\! mkdir -p /tmp/ctas_rolespc --- end_ignore --- prepare role and tablespace. -DROP TABLESPACE IF EXISTS ctas_rolespc; -NOTICE: tablespace "ctas_rolespc" does not exist, skipping -CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; -CREATE ROLE hardlimit_r; -NOTICE: resource queue required -- using default resource queue "pg_default" -GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; -GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; -SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SET default_tablespace = ctas_rolespc; -SET ROLE hardlimit_r; --- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- toast table -CREATE TABLE toast_table (i) - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- ao table -CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- aocs table -CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); -ERROR: tablespace: 40497, role: 40498 diskquota exceeded (seg1 127.0.0.1:7003 pid=1968424) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- disable hardlimit and do some clean-ups. -DROP TABLE IF EXISTS t1; -NOTICE: table "t1" does not exist, skipping -DROP TABLE IF EXISTS t2; -NOTICE: table "t2" does not exist, skipping -DROP TABLE IF EXISTS toast_table; -NOTICE: table "toast_table" does not exist, skipping -DROP TABLE IF EXISTS ao_table; -NOTICE: table "ao_table" does not exist, skipping -DROP TABLE IF EXISTS aocs_table; -NOTICE: table "aocs_table" does not exist, skipping -RESET ROLE; -RESET default_tablespace; -DROP TABLESPACE ctas_rolespc; -REVOKE USAGE ON SCHEMA diskquota FROM hardlimit_r; -DROP ROLE hardlimit_r; -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_ctas_tablespace_schema.out b/tests/regress/expected7/test_ctas_tablespace_schema.out deleted file mode 100644 index 9c9bde2e190..00000000000 --- a/tests/regress/expected7/test_ctas_tablespace_schema.out +++ /dev/null @@ -1,74 +0,0 @@ --- Test that diskquota is able to cancel a running CTAS query by the tablespace schema quota. -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null --- start_ignore -\! mkdir -p /tmp/ctas_schemaspc --- end_ignore --- prepare tablespace and schema -DROP TABLESPACE IF EXISTS ctas_schemaspc; -NOTICE: tablespace "ctas_schemaspc" does not exist, skipping -CREATE TABLESPACE ctas_schemaspc LOCATION '/tmp/ctas_schemaspc'; -CREATE SCHEMA hardlimit_s; -SELECT diskquota.set_schema_tablespace_quota('hardlimit_s', 'ctas_schemaspc', '1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SET search_path TO hardlimit_s; -SET default_tablespace = ctas_schemaspc; --- heap table -CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg0 127.0.0.1:7002 pid=1970360) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- toast table -CREATE TABLE toast_table (i) - AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg1 127.0.0.1:7003 pid=1970361) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- ao table -CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg0 127.0.0.1:7002 pid=1970360) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- aocs table -CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) - AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); -ERROR: tablespace: 40635, schema: 40636 diskquota exceeded (seg2 127.0.0.1:7004 pid=1970362) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- disable hardlimit and do some clean-ups -DROP TABLE IF EXISTS t1; -NOTICE: table "t1" does not exist, skipping -DROP TABLE IF EXISTS t2; -NOTICE: table "t2" does not exist, skipping -DROP TABLE IF EXISTS toast_table; -NOTICE: table "toast_table" does not exist, skipping -DROP TABLE IF EXISTS ao_table; -NOTICE: table "ao_table" does not exist, skipping -DROP TABLE IF EXISTS aocs_table; -NOTICE: table "aocs_table" does not exist, skipping -RESET search_path; -RESET default_tablespace; -DROP SCHEMA hardlimit_s; -DROP TABLESPACE ctas_schemaspc; -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null diff --git a/tests/regress/expected7/test_default_tablespace.out b/tests/regress/expected7/test_default_tablespace.out deleted file mode 100644 index d14251b5dc5..00000000000 --- a/tests/regress/expected7/test_default_tablespace.out +++ /dev/null @@ -1,186 +0,0 @@ --- test role_tablespace_quota works with tables/databases in default tablespace --- test role_tablespace_quota works with tables/databases in non-default tablespace with hard limits on --- start_ignore -\! mkdir -p /tmp/custom_tablespace --- end_ignore -DROP ROLE if EXISTS role1; -NOTICE: role "role1" does not exist, skipping -DROP ROLE if EXISTS role2; -NOTICE: role "role2" does not exist, skipping -CREATE ROLE role1 SUPERUSER; -CREATE ROLE role2 SUPERUSER; -SET ROLE role1; -DROP TABLE if EXISTS t; -NOTICE: table "t" does not exist, skipping -CREATE TABLE t (i int) DISTRIBUTED BY (i); --- with hard limits off -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert to success -INSERT INTO t SELECT generate_series(1, 100); -INSERT INTO t SELECT generate_series(1, 1000000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert to fail -INSERT INTO t SELECT generate_series(1, 1000000); -ERROR: tablespace: pg_default, role: role1 diskquota exceeded -SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r -WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' -ORDER BY r.rolname, t.spcname, b.target_type; - rolname | spcname | target_type ----------+------------+----------------------- - role1 | pg_default | ROLE_TABLESPACE_QUOTA -(1 row) - -DROP TABLE IF EXISTS t; -SELECT diskquota.set_role_tablespace_quota('role1', 'pg_default', '-1'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SET ROLE role2; -CREATE TABLE t (i int) DISTRIBUTED BY (i); --- with hard limits on -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.set_role_tablespace_quota('role2', 'pg_default', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert to fail because of hard limits -INSERT INTO t SELECT generate_series(1, 50000000); -ERROR: tablespace: 1663, role: 40739 diskquota exceeded (seg0 127.0.0.1:7002 pid=1971570) -DROP TABLE IF EXISTS t; -SET ROLE role1; --- database in customized tablespace -CREATE TABLESPACE custom_tablespace LOCATION '/tmp/custom_tablespace'; -CREATE DATABASE db_with_tablespace TABLESPACE custom_tablespace; -\c db_with_tablespace; -SET ROLE role1; -CREATE EXTENSION diskquota; --- with hard limits off -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert to success -CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 100) DISTRIBUTED BY (i); -INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert to fail -INSERT INTO t_in_custom_tablespace SELECT generate_series(1, 1000000); -ERROR: tablespace: custom_tablespace, role: role1 diskquota exceeded -SELECT r.rolname, t.spcname, b.target_type -FROM diskquota.rejectmap AS b, pg_tablespace AS t, pg_roles AS r -WHERE b.tablespace_oid = t.oid AND b.target_oid = r.oid AND r.rolname = 'role1' -ORDER BY r.rolname, t.spcname, b.target_type; - rolname | spcname | target_type ----------+-------------------+----------------------- - role1 | custom_tablespace | ROLE_TABLESPACE_QUOTA -(1 row) - -DROP TABLE IF EXISTS t_in_custom_tablespace; -SELECT diskquota.set_role_tablespace_quota('role1', 'custom_tablespace', '-1'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SET ROLE role2; --- with hard limits on -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.set_role_tablespace_quota('role2', 'custom_tablespace', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP TABLE IF EXISTS t_in_custom_tablespace; -NOTICE: table "t_in_custom_tablespace" does not exist, skipping --- expect insert to fail because of hard limits -CREATE TABLE t_in_custom_tablespace (i) AS SELECT generate_series(1, 50000000) DISTRIBUTED BY (i); -ERROR: tablespace: 40746, role: 40739 diskquota exceeded (seg2 127.0.0.1:7004 pid=1973467) --- clean up -DROP TABLE IF EXISTS t_in_custom_tablespace; -NOTICE: table "t_in_custom_tablespace" does not exist, skipping -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION IF EXISTS diskquota; -\c contrib_regression; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP DATABASE IF EXISTS db_with_tablespace; -DROP TABLESPACE IF EXISTS custom_tablespace; -RESET ROLE; -DROP ROLE IF EXISTS role1; -DROP ROLE IF EXISTS role2; diff --git a/tests/regress/expected7/test_delete_quota.out b/tests/regress/expected7/test_delete_quota.out deleted file mode 100644 index 967dd917f74..00000000000 --- a/tests/regress/expected7/test_delete_quota.out +++ /dev/null @@ -1,37 +0,0 @@ --- Test delete disk quota -CREATE SCHEMA deleteschema; -SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO deleteschema; -CREATE TABLE c (i INT) DISTRIBUTED BY (i); --- expect failed -INSERT INTO c SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect fail -INSERT INTO c SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: deleteschema -SELECT diskquota.set_schema_quota('deleteschema', '-1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO c SELECT generate_series(1,100); -DROP TABLE c; -RESET search_path; -DROP SCHEMA deleteschema; diff --git a/tests/regress/expected7/test_drop_after_pause.out b/tests/regress/expected7/test_drop_after_pause.out deleted file mode 100644 index 24cbb191ab2..00000000000 --- a/tests/regress/expected7/test_drop_after_pause.out +++ /dev/null @@ -1,64 +0,0 @@ -CREATE DATABASE test_drop_after_pause; -\c test_drop_after_pause -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a SELECT generate_series(1,10000000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: 25290 (seg2 127.0.0.1:7004 pid=1905198) -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_drop_after_pause; diff --git a/tests/regress/expected7/test_drop_extension.out b/tests/regress/expected7/test_drop_extension.out deleted file mode 100644 index b946654c7f3..00000000000 --- a/tests/regress/expected7/test_drop_extension.out +++ /dev/null @@ -1,13 +0,0 @@ -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; diff --git a/tests/regress/expected7/test_drop_table.out b/tests/regress/expected7/test_drop_table.out deleted file mode 100644 index d50db9e1b64..00000000000 --- a/tests/regress/expected7/test_drop_table.out +++ /dev/null @@ -1,34 +0,0 @@ --- Test Drop table -CREATE SCHEMA sdrtbl; -SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO sdrtbl; -CREATE TABLE a(i INT) DISTRIBUTED BY (i); -CREATE TABLE a2(i INT) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: sdrtbl -DROP TABLE a; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO a2 SELECT generate_series(1,100); -DROP TABLE a2; -RESET search_path; -DROP SCHEMA sdrtbl; diff --git a/tests/regress/expected7/test_extension.out b/tests/regress/expected7/test_extension.out deleted file mode 100644 index fbd8483f6c4..00000000000 --- a/tests/regress/expected7/test_extension.out +++ /dev/null @@ -1,523 +0,0 @@ --- NOTE: when test this script, you must make sure that there is no diskquota --- worker process. -CREATE DATABASE dbx0 ; -CREATE DATABASE dbx1 ; -CREATE DATABASE dbx2 ; -CREATE DATABASE dbx3 ; -CREATE DATABASE dbx4 ; -CREATE DATABASE dbx5 ; -CREATE DATABASE dbx6 ; -CREATE DATABASE dbx7 ; -CREATE DATABASE dbx8 ; -CREATE DATABASE dbx9 ; -CREATE DATABASE dbx10 ; ---start_ignore -\! gpconfig -c diskquota.max_workers -v 20 --skipvalidation -20230117:12:50:10:1924108 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 20 --skipvalidation' -\! gpstop -arf -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:50:11:1924636 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:12:50:12:1924636 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore -\c -show max_worker_processes; - max_worker_processes ----------------------- - 20 -(1 row) - -show diskquota.max_workers; - diskquota.max_workers ------------------------ - 20 -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\c dbx0 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx1 -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -INSERT INTO SX.a values(generate_series(0, 100000)); -CREATE EXTENSION diskquota; -WARNING: [diskquota] diskquota is not ready because current database is not empty -HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx2 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx3 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx4 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx5 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx6 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx7 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx8 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE SCHEMA SX; -CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -SELECT diskquota.set_schema_quota('SX', '1MB'); - set_schema_quota ------------------- - -(1 row) - -INSERT INTO SX.a values(generate_series(0, 100000)); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO SX.a values(generate_series(0, 10)); -ERROR: schema's disk space quota exceeded with name: sx -DROP TABLE SX.a; -\c dbx9 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\c dbx10 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\c dbx0 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx1 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx2 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx3 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx4 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx5 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx6 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx7 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx8 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx9 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c dbx10 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE dbx0 ; -DROP DATABASE dbx1 ; -DROP DATABASE dbx2 ; -DROP DATABASE dbx3 ; -DROP DATABASE dbx4 ; -DROP DATABASE dbx5 ; -DROP DATABASE dbx6 ; -DROP DATABASE dbx7 ; -DROP DATABASE dbx8 ; -DROP DATABASE dbx9 ; -DROP DATABASE dbx10 ; ---start_ignore -\! gpconfig -c diskquota.max_workers -v 1 --skipvalidation -20230117:12:52:37:1941441 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1 --skipvalidation' -\! gpstop -arf; -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:12:52:37:1941981 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:12:52:38:1941981 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore -\c -show diskquota.max_workers; - diskquota.max_workers ------------------------ - 1 -(1 row) - diff --git a/tests/regress/expected7/test_fast_disk_check.out b/tests/regress/expected7/test_fast_disk_check.out deleted file mode 100644 index d883934ffcf..00000000000 --- a/tests/regress/expected7/test_fast_disk_check.out +++ /dev/null @@ -1,23 +0,0 @@ --- Test SCHEMA -CREATE SCHEMA s1; -SET search_path to s1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,200000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; -WARNING: skipping "__gp_log_segment_ext" --- cannot calculate this foreign table size -WARNING: skipping "__gp_log_master_ext" --- cannot calculate this foreign table size -WARNING: skipping "gp_disk_free" --- cannot calculate this foreign table size - ?column? ----------- - f -(1 row) - -RESET search_path; -DROP TABLE s1.a; -DROP SCHEMA s1; diff --git a/tests/regress/expected7/test_fetch_table_stat.out b/tests/regress/expected7/test_fetch_table_stat.out deleted file mode 100644 index b9be7aec6b2..00000000000 --- a/tests/regress/expected7/test_fetch_table_stat.out +++ /dev/null @@ -1,35 +0,0 @@ --- --- 1. Test that when an error occurs in diskquota_fetch_table_stat --- the error message is preserved for us to debug. --- -CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); --- Inject an error to a segment server, since this UDF is only called on segments. -SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) - FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Dispatch diskquota_fetch_table_stat to segments. --- There should be a warning message from segment server saying: --- fault triggered, fault name:'diskquota_fetch_table_stat' fault type:'error' --- We're not interested in the oid here, we aggregate the result by COUNT(*). -SELECT COUNT(*) - FROM (SELECT diskquota.diskquota_fetch_table_stat(1, array[(SELECT oid FROM pg_class WHERE relname='t_error_handling')]) - FROM gp_dist_random('gp_id') WHERE gp_segment_id=0) AS count; - count -------- - 1 -(1 row) - --- Reset the fault injector to prevent future failure. -SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'reset', dbid) - FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Do some clean-ups. -DROP TABLE t_error_handling; diff --git a/tests/regress/expected7/test_index.out b/tests/regress/expected7/test_index.out deleted file mode 100644 index a35ec4f95cd..00000000000 --- a/tests/regress/expected7/test_index.out +++ /dev/null @@ -1,133 +0,0 @@ --- Test schema --- start_ignore -\! mkdir -p /tmp/indexspc --- end_ignore -CREATE SCHEMA indexschema1; -DROP TABLESPACE IF EXISTS indexspc; -NOTICE: tablespace "indexspc" does not exist, skipping -CREATE TABLESPACE indexspc LOCATION '/tmp/indexspc'; -SET search_path TO indexschema1; -CREATE TABLE test_index_a(i int) TABLESPACE indexspc DISTRIBUTED BY (i); -INSERT INTO test_index_a SELECT generate_series(1,20000); -SELECT diskquota.set_schema_tablespace_quota('indexschema1', 'indexspc','2 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes -FROM diskquota.show_fast_schema_tablespace_quota_view -WHERE schema_name='indexschema1' and tablespace_name='indexspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+-----------------+-------------+----------------------------- - indexschema1 | indexspc | 2 | 1081344 -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 'test_index_a'::regclass -ORDER BY segid; - tableid | size | segid ---------------+---------+------- - test_index_a | 1081344 | -1 - test_index_a | 360448 | 0 - test_index_a | 360448 | 1 - test_index_a | 360448 | 2 -(4 rows) - --- create index for the table, index in default tablespace -CREATE INDEX a_index ON test_index_a(i); -INSERT INTO test_index_a SELECT generate_series(1,10000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO test_index_a SELECT generate_series(1,100); -SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+-----------------+-------------+----------------------------- - indexschema1 | indexspc | 2 | 1441792 -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 'test_index_a'::regclass -ORDER BY segid; - tableid | size | segid ---------------+---------+------- - test_index_a | 1441792 | -1 - test_index_a | 491520 | 0 - test_index_a | 491520 | 1 - test_index_a | 458752 | 2 -(4 rows) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 'a_index'::regclass -ORDER BY segid; - tableid | size | segid ----------+---------+------- - a_index | 1212416 | -1 - a_index | 393216 | 0 - a_index | 393216 | 1 - a_index | 393216 | 2 -(4 rows) - --- add index to tablespace indexspc -ALTER index a_index SET TABLESPACE indexspc; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+-----------------+-------------+----------------------------- - indexschema1 | indexspc | 2 | 2654208 -(1 row) - -SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; - size | segid ----------+------- - 1212416 | -1 - 1441792 | -1 -(2 rows) - --- expect insert fail -INSERT INTO test_index_a SELECT generate_series(1,100); -ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded --- index tablespace quota exceeded -ALTER table test_index_a SET TABLESPACE pg_default; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO test_index_a SELECT generate_series(1,100); -INSERT INTO test_index_a SELECT generate_series(1,200000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO test_index_a SELECT generate_series(1,100); -ERROR: tablespace: indexspc, schema: indexschema1 diskquota exceeded -RESET search_path; -DROP INDEX indexschema1.a_index; -DROP TABLE indexschema1.test_index_a; -DROP SCHEMA indexschema1; -DROP TABLESPACE indexspc; diff --git a/tests/regress/expected7/test_many_active_tables.out b/tests/regress/expected7/test_many_active_tables.out deleted file mode 100644 index f3298c1ce52..00000000000 --- a/tests/regress/expected7/test_many_active_tables.out +++ /dev/null @@ -1,31 +0,0 @@ -CREATE TABLE t1 (pk int, val int) -DISTRIBUTED BY (pk) -PARTITION BY RANGE (pk) (START (1) END (1000) EVERY (1)); -INSERT INTO t1 -SELECT pk, val -FROM generate_series(1, 10000) AS val, generate_series(1, 999) AS pk; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT count(*) >= 999 FROM diskquota.table_size WHERE size > 0; - ?column? ----------- - t -(1 row) - -DROP TABLE t1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT count(*) < 999 FROM diskquota.table_size WHERE size > 0; - ?column? ----------- - t -(1 row) - diff --git a/tests/regress/expected7/test_mistake.out b/tests/regress/expected7/test_mistake.out deleted file mode 100644 index fab4c6eb2f7..00000000000 --- a/tests/regress/expected7/test_mistake.out +++ /dev/null @@ -1,34 +0,0 @@ --- to make sure that the schema 'notfoundns' is really not found -select nspname from pg_namespace where nspname = 'notfoundns'; - nspname ---------- -(0 rows) - -select diskquota.set_schema_quota('notfoundns', '1 MB'); -ERROR: schema "notfoundns" does not exist -DROP SCHEMA IF EXISTS nmistake; -NOTICE: schema "nmistake" does not exist, skipping -CREATE SCHEMA nmistake; -select diskquota.set_schema_quota('nmistake', '0 MB'); -ERROR: disk quota can not be set to 0 MB -DROP ROLE IF EXISTS rmistake; -NOTICE: role "rmistake" does not exist, skipping -CREATE ROLE rmistake; -NOTICE: resource queue required -- using default resource queue "pg_default" -select diskquota.set_role_quota('rmistake', '0 MB'); -ERROR: disk quota can not be set to 0 MB --- start_ignore -\! mkdir -p /tmp/spcmistake --- end_ignore -DROP TABLESPACE IF EXISTS spcmistake; -NOTICE: tablespace "spcmistake" does not exist, skipping -CREATE TABLESPACE spcmistake LOCATION '/tmp/spcmistake'; -SELECT diskquota.set_schema_tablespace_quota('nmistake', 'spcmistake','0 MB'); -ERROR: disk quota can not be set to 0 MB -SELECT diskquota.set_role_tablespace_quota('rmistake', 'spcmistake','0 MB'); -ERROR: disk quota can not be set to 0 MB -SELECT diskquota.set_per_segment_quota('spcmistake', 0); -ERROR: per segment quota ratio can not be set to 0 -DROP SCHEMA nmistake; -DROP ROLE rmistake; -DROP TABLESPACE spcmistake; diff --git a/tests/regress/expected7/test_partition.out b/tests/regress/expected7/test_partition.out deleted file mode 100644 index 322c00c6422..00000000000 --- a/tests/regress/expected7/test_partition.out +++ /dev/null @@ -1,63 +0,0 @@ --- Test partition table -CREATE SCHEMA s8; -SELECT diskquota.SET_schema_quota('s8', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s8; -CREATE TABLE measurement ( - city_id int not null, - logdate date not null, - peaktemp int, - unitsales int -)PARTITION BY RANGE (logdate) -( - PARTITION Feb06 START (date '2006-02-01') INCLUSIVE, - PARTITION Mar06 START (date '2006-03-01') INCLUSIVE - END (date '2016-04-01') EXCLUSIVE -); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO measurement SELECT generate_series(1,100), '2006-02-02' ,1,1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; --- expect insert fail -INSERT INTO measurement SELECT generate_series(1,100000), '2006-03-02' ,1,1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -ERROR: schema's disk space quota exceeded with name: s8 --- expect insert fail -INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; -ERROR: schema's disk space quota exceeded with name: s8 -DELETE FROM measurement WHERE logdate='2006-03-02'; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -VACUUM FULL measurement; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO measurement SELECT 1, '2006-02-02' ,1,1; -INSERT INTO measurement SELECT 1, '2006-03-03' ,1,1; -DROP TABLE measurement; -RESET search_path; -DROP SCHEMA s8; diff --git a/tests/regress/expected7/test_pause_and_resume.out b/tests/regress/expected7/test_pause_and_resume.out deleted file mode 100644 index 18ae2573d36..00000000000 --- a/tests/regress/expected7/test_pause_and_resume.out +++ /dev/null @@ -1,70 +0,0 @@ --- Test pause and resume. -CREATE SCHEMA s1; -SET search_path TO s1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); --- expect insert succeed -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: s1 --- pause extension -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size -WHERE tableid = 'a'::regclass AND segid = -1; - tableid | size | segid ----------+---------+------- - a | 3932160 | -1 -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,100000); --- resume extension -SELECT diskquota.resume(); - resume --------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: s1 --- table size should be updated after resume -SELECT tableid::regclass, size, segid FROM diskquota.table_size -WHERE tableid = 'a'::regclass AND segid = -1; - tableid | size | segid ----------+---------+------- - a | 7569408 | -1 -(1 row) - -RESET search_path; -DROP TABLE s1.a; -DROP SCHEMA s1; diff --git a/tests/regress/expected7/test_pause_and_resume_multiple_db.out b/tests/regress/expected7/test_pause_and_resume_multiple_db.out deleted file mode 100644 index ed211216848..00000000000 --- a/tests/regress/expected7/test_pause_and_resume_multiple_db.out +++ /dev/null @@ -1,201 +0,0 @@ --- need 'contrib_regression' as test database -\c -CREATE SCHEMA s1; -SET search_path TO s1; -CREATE DATABASE test_pause_and_resume; -CREATE DATABASE test_new_create_database; -\c test_pause_and_resume -CREATE SCHEMA s1; -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\c contrib_regression -CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed -\c test_pause_and_resume -CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed -\c contrib_regression -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -\c test_pause_and_resume -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -\c contrib_regression -SELECT diskquota.pause(); -- pause extension, onle effect current database - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; - tableid | size | segid ----------+---------+------- - s1.a | 3932160 | -1 -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed -\c test_pause_and_resume -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; - tableid | size | segid ----------+---------+------- - s1.a | 3932160 | -1 -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -SELECT diskquota.pause(); -- pause extension, onle effect current database - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 's1.a'::regclass AND segid = -1; - tableid | size | segid ----------+---------+------- - s1.a | 3932160 | -1 -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed -\c test_new_create_database; -CREATE SCHEMA s1; -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active although other database is paused - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -SELECT diskquota.pause(); -- pause extension, onle effect current database - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed --- resume should onle effect current database -SELECT diskquota.resume(); - resume --------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -\c contrib_regression -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert succeed -SELECT diskquota.resume(); - resume --------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO s1.a SELECT generate_series(1,100); -- expect insert fail -ERROR: schema's disk space quota exceeded with name: s1 -\c test_pause_and_resume -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c test_new_create_database -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP SCHEMA s1 CASCADE; -NOTICE: drop cascades to table s1.a -DROP DATABASE test_pause_and_resume; -DROP DATABASE test_new_create_database; diff --git a/tests/regress/expected7/test_primary_failure.out b/tests/regress/expected7/test_primary_failure.out deleted file mode 100644 index 4e3ffa185d8..00000000000 --- a/tests/regress/expected7/test_primary_failure.out +++ /dev/null @@ -1,126 +0,0 @@ -CREATE SCHEMA ftsr; -SELECT diskquota.set_schema_quota('ftsr', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO ftsr; -create or replace language plpythonu; -ERROR: could not access file "$libdir/plpython2": No such file or directory --- --- pg_ctl: --- datadir: data directory of process to target with `pg_ctl` --- command: commands valid for `pg_ctl` --- command_mode: modes valid for `pg_ctl -m` --- -create or replace function pg_ctl(datadir text, command text, command_mode text default 'immediate') -returns text as $$ - import subprocess - if command not in ('stop', 'restart'): - return 'Invalid command input' - - cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir - cmd = cmd + '-W -m %s %s' % (command_mode, command) - - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; -ERROR: language "plpythonu" does not exist -HINT: Use CREATE EXTENSION to load the language into the database. -create or replace function pg_recoverseg(datadir text, command text) -returns text as $$ - import subprocess - cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; -ERROR: language "plpythonu" does not exist -HINT: Use CREATE EXTENSION to load the language into the database. -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: ftsr --- now one of primary is down -select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); -ERROR: function pg_ctl(text, unknown) does not exist -LINE 1: select pg_ctl((select datadir from gp_segment_configuration ... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- switch mirror to primary -select gp_request_fts_probe_scan(); - gp_request_fts_probe_scan ---------------------------- - t -(1 row) - --- check GPDB status -select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; - content | preferred_role | role | status | mode ----------+----------------+------+--------+------ - 0 | p | p | u | s - 0 | m | m | u | s -(2 rows) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: ftsr --- increase quota -SELECT diskquota.set_schema_quota('ftsr', '200 MB'); - set_schema_quota ------------------- - -(1 row) - --- pull up failed primary --- start_ignore -select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); -ERROR: function pg_recoverseg(text, unknown) does not exist -LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -ERROR: function pg_recoverseg(text, unknown) does not exist -LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); -ERROR: function pg_recoverseg(text, unknown) does not exist -LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'ar'); -ERROR: function pg_recoverseg(text, unknown) does not exist -LINE 1: select pg_recoverseg((select datadir from gp_segment_configu... - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. --- check GPDB status -select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; - content | preferred_role | role | status | mode ----------+----------------+------+--------+------ - 0 | p | p | u | s - 0 | m | m | u | s -(2 rows) - --- end_ignore -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; - quota_in_mb | nspsize_in_bytes --------------+------------------ - 200 | 3932160 -(1 row) - -INSERT INTO a SELECT generate_series(1,100); -DROP TABLE a; -DROP SCHEMA ftsr CASCADE; diff --git a/tests/regress/expected7/test_quota_view_no_table.out b/tests/regress/expected7/test_quota_view_no_table.out deleted file mode 100644 index 27a0b315f5b..00000000000 --- a/tests/regress/expected7/test_quota_view_no_table.out +++ /dev/null @@ -1,64 +0,0 @@ -CREATE ROLE no_table SUPERUSER; -CREATE SCHEMA no_table; -SELECT diskquota.set_schema_quota('no_table', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT schema_name, quota_in_mb, nspsize_in_bytes -FROM diskquota.show_fast_schema_quota_view; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - no_table | 1 | 0 -(1 row) - -SELECT diskquota.set_role_quota('no_table', '1 MB'); - set_role_quota ----------------- - -(1 row) - -SELECT role_name, quota_in_mb, rolsize_in_bytes -FROM diskquota.show_fast_role_quota_view; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - no_table | 1 | 0 -(1 row) - -SELECT diskquota.set_schema_tablespace_quota('no_table', 'pg_default', '1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes -FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - no_table | pg_default | 1 | 0 -(1 row) - -SELECT diskquota.set_role_tablespace_quota('no_table', 'pg_default', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT role_name, tablespace_name , quota_in_mb, rolsize_tablespace_in_bytes -FROM diskquota.show_fast_role_tablespace_quota_view; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - no_table | pg_default | 1 | 0 -(1 row) - -DROP ROLE no_table; -DROP SCHEMA no_table; --- Wait until the quota configs are removed from the memory --- automatically after DROP. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - diff --git a/tests/regress/expected7/test_readiness_logged.out b/tests/regress/expected7/test_readiness_logged.out deleted file mode 100644 index c798f08b0ee..00000000000 --- a/tests/regress/expected7/test_readiness_logged.out +++ /dev/null @@ -1,38 +0,0 @@ -CREATE DATABASE test_readiness_logged; -\c test_readiness_logged -CREATE TABLE t (i int) DISTRIBUTED BY (i); -CREATE EXTENSION diskquota; -WARNING: [diskquota] diskquota is not ready because current database is not empty -HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -CREATE EXTENSION diskquota_test; -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - wait ------- - t -(1 row) - -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; - count -------- - 1 -(1 row) - -\! gpstop -raf > /dev/null -\c -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - wait ------- - t -(1 row) - -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; - count -------- - 2 -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_readiness_logged; diff --git a/tests/regress/expected7/test_recreate.out b/tests/regress/expected7/test_recreate.out deleted file mode 100644 index c69cd82e77e..00000000000 --- a/tests/regress/expected7/test_recreate.out +++ /dev/null @@ -1,27 +0,0 @@ -\c -CREATE DATABASE test_recreate; -\c diskquota -INSERT INTO diskquota_namespace.database_list(dbid) SELECT oid FROM pg_database WHERE datname = 'test_recreate'; -\c test_recreate -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); -- shoud be ok - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_recreate; diff --git a/tests/regress/expected7/test_rejectmap_mul_db.out b/tests/regress/expected7/test_rejectmap_mul_db.out deleted file mode 100644 index 89142d8b041..00000000000 --- a/tests/regress/expected7/test_rejectmap_mul_db.out +++ /dev/null @@ -1,79 +0,0 @@ --- One db's rejectmap update should not impact on other db's rejectmap -CREATE DATABASE tjmu1; -CREATE DATABASE tjmu2; --- start_ignore -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null --- increase the naptime to avoid active table gets cleared by tjmu1's worker -\! gpconfig -c "diskquota.naptime" -v 1 > /dev/null -\! gpstop -u > /dev/null --- end_ignore -\c tjmu1 -CREATE EXTENSION diskquota; -SELECT diskquota.set_schema_quota('public', '1MB'); - set_schema_quota ------------------- - -(1 row) - -CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Trigger hard limit to dispatch rejectmap for tjmu1 -INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:8003 pid=43782) --- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 -SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; - count -------- - 1 -(1 row) - -\c tjmu2 -CREATE EXTENSION diskquota; -SELECT diskquota.set_schema_quota('public', '1MB'); - set_schema_quota ------------------- - -(1 row) - -CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Trigger hard limit to dispatch rejectmap for tjmu2 -INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=1961759) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - ---\c tjmu1 --- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 --- The entries for tjmu1 should not be cleared -SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; - count -------- - 2 -(1 row) - --- start_ignore -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpconfig -c "diskquota.naptime" -v 0 > /dev/null -\! gpstop -u > /dev/null --- end_ignore -\c tjmu1 -DROP EXTENSION diskquota; -\c tjmu2 -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE tjmu1; -DROP DATABASE tjmu2; diff --git a/tests/regress/expected7/test_relation_size.out b/tests/regress/expected7/test_relation_size.out deleted file mode 100644 index 27b4a4eb7de..00000000000 --- a/tests/regress/expected7/test_relation_size.out +++ /dev/null @@ -1,99 +0,0 @@ -CREATE TEMP TABLE t1(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1'); - relation_size ---------------- - 688128 -(1 row) - -SELECT pg_table_size('t1'); - pg_table_size ---------------- - 688128 -(1 row) - -CREATE TABLE t2(i int) DISTRIBUTED BY (i); -INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2'); - relation_size ---------------- - 688128 -(1 row) - -SELECT pg_table_size('t2'); - pg_table_size ---------------- - 688128 -(1 row) - --- start_ignore -\! mkdir -p /tmp/test_spc --- end_ignore -DROP TABLESPACE IF EXISTS test_spc; -NOTICE: tablespace "test_spc" does not exist, skipping -CREATE TABLESPACE test_spc LOCATION '/tmp/test_spc'; -ALTER TABLE t1 SET TABLESPACE test_spc; -INSERT INTO t1 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t1'); - relation_size ---------------- - 1081344 -(1 row) - -SELECT pg_table_size('t1'); - pg_table_size ---------------- - 1081344 -(1 row) - -ALTER TABLE t2 SET TABLESPACE test_spc; -INSERT INTO t2 SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('t2'); - relation_size ---------------- - 1081344 -(1 row) - -SELECT pg_table_size('t2'); - pg_table_size ---------------- - 1081344 -(1 row) - -DROP TABLE t1, t2; -DROP TABLESPACE test_spc; --- start_ignore -\! rm -rf /tmp/test_spc - -- end_ignore -CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); -INSERT INTO ao SELECT generate_series(1, 10000); -SELECT diskquota.relation_size('ao'); - relation_size ---------------- - 100200 -(1 row) - -SELECT pg_relation_size('ao'); - pg_relation_size ------------------- - 100200 -(1 row) - -DROP TABLE ao; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; -SELECT diskquota.relation_size('aocs'); - relation_size ---------------- - 10092696 -(1 row) - -SELECT pg_relation_size('aocs'); - pg_relation_size ------------------- - 10092696 -(1 row) - -DROP TABLE aocs; diff --git a/tests/regress/expected7/test_rename.out b/tests/regress/expected7/test_rename.out deleted file mode 100644 index 1e9ab7ae7c3..00000000000 --- a/tests/regress/expected7/test_rename.out +++ /dev/null @@ -1,71 +0,0 @@ --- test rename schema -CREATE SCHEMA srs1; -SELECT diskquota.set_schema_quota('srs1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -set search_path to srs1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: srs1 -ALTER SCHEMA srs1 RENAME TO srs2; -SET search_path TO srs2; --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: srs2 --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: srs2 -DROP TABLE a2; -RESET search_path; -DROP SCHEMA srs2; --- test rename role -CREATE SCHEMA srr1; -CREATE ROLE srerole NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('srerole', '1MB'); - set_role_quota ----------------- - -(1 row) - -SET search_path TO srr1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -ALTER TABLE a OWNER TO srerole; --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name: srerole -ALTER ROLE srerole RENAME TO srerole2; --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name: srerole2 --- test rename table -ALTER TABLE a RENAME TO a2; --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,10); -ERROR: role's disk space quota exceeded with name: srerole2 -DROP TABLE a2; -DROP ROLE srerole2; -RESET search_path; -DROP SCHEMA srr1; diff --git a/tests/regress/expected7/test_reschema.out b/tests/regress/expected7/test_reschema.out deleted file mode 100644 index 6b88a8080b6..00000000000 --- a/tests/regress/expected7/test_reschema.out +++ /dev/null @@ -1,39 +0,0 @@ --- Test re-set_schema_quota -CREATE SCHEMA srE; -SELECT diskquota.set_schema_quota('srE', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO srE; -CREATE TABLE a(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail when exceed quota limit -INSERT INTO a SELECT generate_series(1,1000); -ERROR: schema's disk space quota exceeded with name: sre --- set schema quota larger -SELECT diskquota.set_schema_quota('srE', '1 GB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,1000); -DROP TABLE a; -RESET search_path; -DROP SCHEMA srE; diff --git a/tests/regress/expected7/test_role.out b/tests/regress/expected7/test_role.out deleted file mode 100644 index e51d4685586..00000000000 --- a/tests/regress/expected7/test_role.out +++ /dev/null @@ -1,138 +0,0 @@ --- Test role quota -CREATE SCHEMA srole; -SET search_path TO srole; -CREATE ROLE u1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE u2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); -ALTER TABLE b OWNER TO u1; -CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); -ALTER TABLE b2 OWNER TO u1; -SELECT diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name: u1 --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name: u1 --- Delete role quota -SELECT diskquota.set_role_quota('u1', '-1 MB'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- Reset role quota -SELECT diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name: u1 -SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view WHERE role_name='u1'; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - u1 | 1 | 4194304 -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 'b'::regclass -ORDER BY segid; - tableid | size | segid ----------+---------+------- - b | 4063232 | -1 - b | 1343488 | 0 - b | 1343488 | 1 - b | 1343488 | 2 -(4 rows) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 'b2'::regclass -ORDER BY segid; - tableid | size | segid ----------+--------+------- - b2 | 131072 | -1 - b2 | 32768 | 0 - b2 | 32768 | 1 - b2 | 32768 | 2 -(4 rows) - -ALTER TABLE b OWNER TO u2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); --- superuser is blocked to set quota ---start_ignore -SELECT rolname from pg_roles where rolsuper=true; - rolname ---------- - zhrt -(1 row) - ---end_ignore -\gset -select diskquota.set_role_quota(:'rolname', '1mb'); -ERROR: Can not set disk quota for system owner: zhrt -select diskquota.set_role_quota(:'rolname', '-1mb'); - set_role_quota ----------------- - -(1 row) - -CREATE ROLE "Tn" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail -ERROR: role "tn" does not exist -SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail -ERROR: role "tn" does not exist -SELECT diskquota.set_role_quota('"Tn"', '-1 MB'); - set_role_quota ----------------- - -(1 row) - -DROP TABLE b, b2; -DROP ROLE u1, u2, "Tn"; -RESET search_path; -DROP SCHEMA srole; diff --git a/tests/regress/expected7/test_schema.out b/tests/regress/expected7/test_schema.out deleted file mode 100644 index 866b4b3e127..00000000000 --- a/tests/regress/expected7/test_schema.out +++ /dev/null @@ -1,109 +0,0 @@ --- Test schema -CREATE SCHEMA s1; -SET search_path TO s1; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: s1 -CREATE TABLE a2(i int) DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); -ERROR: schema's disk space quota exceeded with name: s1 --- Test alter table set schema -CREATE SCHEMA s2; -ALTER TABLE s1.a SET SCHEMA s2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO s2.a SELECT generate_series(1,200); --- prepare a schema that has reached quota limit -CREATE SCHEMA badquota; -DROP ROLE IF EXISTS testbody; -NOTICE: role "testbody" does not exist, skipping -CREATE ROLE testbody; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); -ALTER TABLE badquota.t1 OWNER TO testbody; -INSERT INTO badquota.t1 SELECT generate_series(0, 100000); -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - -SELECT diskquota.set_schema_quota('badquota', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT size, segid FROM diskquota.table_size - WHERE tableid IN (SELECT oid FROM pg_class WHERE relname='t1') - ORDER BY segid DESC; - size | segid ----------+------- - 1310720 | 2 - 1310720 | 1 - 1310720 | 0 - 3932160 | -1 -(4 rows) - --- expect fail -INSERT INTO badquota.t1 SELECT generate_series(0, 10); -ERROR: schema's disk space quota exceeded with name: badquota -ALTER TABLE s2.a SET SCHEMA badquota; --- expect failed -INSERT INTO badquota.a SELECT generate_series(0, 100); -ERROR: schema's disk space quota exceeded with name: badquota -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT schema_name, quota_in_mb FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 's1'; - schema_name | quota_in_mb --------------+------------- - s1 | 1 -(1 row) - -CREATE SCHEMA "Tn1"; -SELECT diskquota.set_schema_quota('"Tn1"', '-1 MB'); - set_schema_quota ------------------- - -(1 row) - -RESET search_path; -DROP TABLE s1.a2, badquota.a; -DROP SCHEMA s1, s2, "Tn1"; -DROP TABLE badquota.t1; -DROP ROLE testbody; -DROP SCHEMA badquota; diff --git a/tests/regress/expected7/test_show_status.out b/tests/regress/expected7/test_show_status.out deleted file mode 100644 index 14c3e7de9fd..00000000000 --- a/tests/regress/expected7/test_show_status.out +++ /dev/null @@ -1,67 +0,0 @@ -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | on - hard limits | off -(2 rows) - -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | on - hard limits | on -(2 rows) - -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | on - hard limits | off -(2 rows) - -select from diskquota.pause(); --- -(1 row) - -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | paused - hard limits | off -(2 rows) - -\! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null -\! gpstop -u > /dev/null -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | paused - hard limits | paused -(2 rows) - -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | paused - hard limits | off -(2 rows) - -select from diskquota.resume(); --- -(1 row) - -\! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null -\! gpstop -u > /dev/null -select * from diskquota.status() where name not like '%version'; - name | status --------------+-------- - soft limits | on - hard limits | off -(2 rows) - diff --git a/tests/regress/expected7/test_tablespace_diff_schema.out b/tests/regress/expected7/test_tablespace_diff_schema.out deleted file mode 100644 index 93da486b836..00000000000 --- a/tests/regress/expected7/test_tablespace_diff_schema.out +++ /dev/null @@ -1,87 +0,0 @@ --- allow set quota for different schema in the same tablespace --- delete quota for one schema will not drop other quotas with different schema in the same tablespace --- start_ignore -\! mkdir -p /tmp/spc_diff_schema --- end_ignore -CREATE TABLESPACE spc_diff_schema LOCATION '/tmp/spc_diff_schema'; -CREATE SCHEMA schema_in_tablespc; -SET search_path TO schema_in_tablespc; -CREATE TABLE a(i int) TABLESPACE spc_diff_schema DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); -SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'spc_diff_schema','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- with hardlimits off, expect to success -INSERT INTO a SELECT generate_series(1,1000000); --- wait for next loop for bgworker to add it to rejectmap -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect to fail -INSERT INTO a SELECT generate_series(1,1000000); -ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded -SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name ---------------------+----------------- - schema_in_tablespc | spc_diff_schema -(1 row) - -SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name ---------------------+----------------- - schema_in_tablespc | spc_diff_schema - schema_in_tablespc | pg_default -(2 rows) - -SELECT diskquota.set_schema_tablespace_quota('schema_in_tablespc', 'pg_default','-1'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT schema_name, tablespace_name FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name ---------------------+----------------- - schema_in_tablespc | spc_diff_schema -(1 row) - --- expect to fail -INSERT INTO a SELECT generate_series(1,1000000); -ERROR: tablespace: spc_diff_schema, schema: schema_in_tablespc diskquota exceeded -reset search_path; -DROP TABLE IF EXISTS schema_in_tablespc.a; -DROP tablespace IF EXISTS spc_diff_schema; -DROP SCHEMA IF EXISTS schema_in_tablespc; --- start_ignore -\! rmdir /tmp/spc_diff_schema - -- end_ignore diff --git a/tests/regress/expected7/test_tablespace_role.out b/tests/regress/expected7/test_tablespace_role.out deleted file mode 100644 index b926890bc81..00000000000 --- a/tests/regress/expected7/test_tablespace_role.out +++ /dev/null @@ -1,194 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir -p /tmp/rolespc --- end_ignore -DROP TABLESPACE IF EXISTS rolespc; -NOTICE: tablespace "rolespc" does not exist, skipping -CREATE TABLESPACE rolespc LOCATION '/tmp/rolespc'; -CREATE SCHEMA rolespcrole; -SET search_path TO rolespcrole; -DROP ROLE IF EXISTS rolespcu1; -NOTICE: role "rolespcu1" does not exist, skipping -DROP ROLE IF EXISTS rolespcu2; -NOTICE: role "rolespcu2" does not exist, skipping -CREATE ROLE rolespcu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE rolespcu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); -CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); -ALTER TABLE b2 OWNER TO rolespcu1; -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespcu1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded --- expect insert fail -INSERT INTO b2 SELECT generate_series(1,100); -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded --- Test show_fast_role_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespcu1' and tablespace_name = 'rolespc'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - rolespcu1 | rolespc | 1 | 4194304 -(1 row) - --- Test alter owner -ALTER TABLE b OWNER TO rolespcu2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- expect insert succeed -INSERT INTO b2 SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespcu1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded --- Test alter tablespace --- start_ignore -\! mkdir -p /tmp/rolespc2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc2; -NOTICE: tablespace "rolespc2" does not exist, skipping -CREATE TABLESPACE rolespc2 LOCATION '/tmp/rolespc2'; -ALTER TABLE b SET TABLESPACE rolespc2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc -ALTER TABLE b SET TABLESPACE rolespc; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded --- Test update quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '10 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,1000000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- superuser is blocked to set quota --- start_ignore -SELECT rolname from pg_roles where rolsuper=true; - rolname ---------- - zhrt -(1 row) - --- end_ignore -\gset -select diskquota.set_role_tablespace_quota(:'rolname', 'rolespc', '1mb'); -ERROR: Can not set disk quota for system owner: zhrt --- start_ignore -\! mkdir -p /tmp/rolespc3 --- end_ignore -DROP ROLE IF EXISTS "Rolespcu3"; -NOTICE: role "Rolespcu3" does not exist, skipping -CREATE ROLE "Rolespcu3" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -DROP TABLESPACE IF EXISTS "Rolespc3"; -NOTICE: tablespace "Rolespc3" does not exist, skipping -CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; -SELECT diskquota.set_role_tablespace_quota('rolespcu1', '"Rolespc3"', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', 'rolespc', '-1 mB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.set_role_tablespace_quota('"Rolespcu3"', '"Rolespc3"', '-1 Mb'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -DROP TABLE b, b2; -DROP ROLE rolespcu1, rolespcu2; -RESET search_path; -DROP SCHEMA rolespcrole; -DROP TABLESPACE rolespc; -DROP TABLESPACE rolespc2; -DROP TABLESPACE "Rolespc3"; diff --git a/tests/regress/expected7/test_tablespace_role_perseg.out b/tests/regress/expected7/test_tablespace_role_perseg.out deleted file mode 100644 index c30030325d7..00000000000 --- a/tests/regress/expected7/test_tablespace_role_perseg.out +++ /dev/null @@ -1,235 +0,0 @@ --- Test role quota --- start_ignore -\! mkdir -p /tmp/rolespc_perseg --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg; -NOTICE: tablespace "rolespc_perseg" does not exist, skipping -CREATE TABLESPACE rolespc_perseg LOCATION '/tmp/rolespc_perseg'; -CREATE SCHEMA rolespc_persegrole; -SET search_path TO rolespc_persegrole; -DROP ROLE IF EXISTS rolespc_persegu1; -NOTICE: role "rolespc_persegu1" does not exist, skipping -DROP ROLE IF EXISTS rolespc_persegu2; -NOTICE: role "rolespc_persegu2" does not exist, skipping -CREATE ROLE rolespc_persegu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE ROLE rolespc_persegu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); -ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -INSERT INTO b SELECT generate_series(1,100); --- expect insert success -INSERT INTO b SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded --- change tablespace role quota -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '10 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- Test show_fast_schema_tablespace_quota_view -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes -------------------+-----------------+-------------+----------------------------- - rolespc_persegu1 | rolespc_perseg | 10 | 4063232 -(1 row) - -SELECT diskquota.set_per_segment_quota('rolespc_perseg', '0.1'); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - ----- expect insert fail by tablespace schema perseg quota -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota --- Test alter owner -ALTER TABLE b OWNER TO rolespc_persegu2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); -ALTER TABLE b OWNER TO rolespc_persegu1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota --- Test alter tablespace --- start_ignore -\! mkdir -p /tmp/rolespc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS rolespc_perseg2; -NOTICE: tablespace "rolespc_perseg2" does not exist, skipping -CREATE TABLESPACE rolespc_perseg2 LOCATION '/tmp/rolespc_perseg2'; -ALTER TABLE b SET TABLESPACE rolespc_perseg2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO b SELECT generate_series(1,100); --- alter table b back to tablespace rolespc_perseg -ALTER TABLE b SET TABLESPACE rolespc_perseg; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 3.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view WHERE role_name = 'rolespc_persegu1' and tablespace_name = 'rolespc_perseg'; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes -------------------+-----------------+-------------+----------------------------- - rolespc_persegu1 | rolespc_perseg | 10 | 4063232 -(1 row) - -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('rolespc_perseg', -1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('rolespc_perseg', 0.11); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO b SELECT generate_series(1,100); -ERROR: tablespace: rolespc_perseg, role: rolespc_persegu1 diskquota exceeded per segment quota --- Test delete quota config -SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO b SELECT generate_series(1,100); --- start_ignore -\! mkdir -p /tmp/rolespc_perseg3 --- end_ignore -DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; -NOTICE: tablespace "Rolespc_perseg3" does not exist, skipping -CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; -CREATE ROLE "Rolespc_persegu3" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -SELECT diskquota.set_per_segment_quota('"Rolespc_perseg3"', 0.11); - set_per_segment_quota ------------------------ - -(1 row) - -DROP table b; -DROP ROLE rolespc_persegu1, rolespc_persegu2, "Rolespc_persegu3"; -RESET search_path; -DROP SCHEMA rolespc_persegrole; -DROP TABLESPACE rolespc_perseg; -DROP TABLESPACE rolespc_perseg2; -DROP TABLESPACE "Rolespc_perseg3"; diff --git a/tests/regress/expected7/test_tablespace_schema.out b/tests/regress/expected7/test_tablespace_schema.out deleted file mode 100644 index a7e57c594be..00000000000 --- a/tests/regress/expected7/test_tablespace_schema.out +++ /dev/null @@ -1,147 +0,0 @@ --- Test schema --- start_ignore -\! mkdir -p /tmp/schemaspc --- end_ignore -CREATE SCHEMA spcs1; -DROP TABLESPACE IF EXISTS schemaspc; -NOTICE: tablespace "schemaspc" does not exist, skipping -CREATE TABLESPACE schemaspc LOCATION '/tmp/schemaspc'; -SET search_path TO spcs1; -CREATE TABLE a(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); --- expect insert fail -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -CREATE TABLE a2(i int) TABLESPACE schemaspc DISTRIBUTED BY (i); --- expect insert fail -INSERT INTO a2 SELECT generate_series(1,100); -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded --- Test alter table set schema -CREATE SCHEMA spcs2; -ALTER TABLE spcs1.a SET SCHEMA spcs2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a2 SELECT generate_series(1,200); --- expect insert succeed -INSERT INTO spcs2.a SELECT generate_series(1,200); -ALTER TABLE spcs2.a SET SCHEMA spcs1; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1' and tablespace_name ='schemaspc'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - spcs1 | schemaspc | 1 | 4030464 -(1 row) - --- Test alter tablespace --- start_ignore -\! mkdir -p /tmp/schemaspc2 --- end_ignore -DROP TABLESPACE IF EXISTS schemaspc2; -NOTICE: tablespace "schemaspc2" does not exist, skipping -CREATE TABLESPACE schemaspc2 LOCATION '/tmp/schemaspc2'; -ALTER TABLE a SET TABLESPACE schemaspc2; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded --- Test update quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '10 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,1000000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded --- Test delete quota config -SELECT diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc', '-1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); --- start_ignore -\! mkdir -p /tmp/schemaspc3 --- end_ignore -DROP TABLESPACE IF EXISTS "Schemaspc3"; -NOTICE: tablespace "Schemaspc3" does not exist, skipping -CREATE TABLESPACE "Schemaspc3" LOCATION '/tmp/schemaspc3'; -CREATE SCHEMA "Spcs2"; -SELECT diskquota.set_schema_tablespace_quota('"Spcs2"', '"Schemaspc3"', '-1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -RESET search_path; -DROP TABLE spcs1.a2, spcs1.a; -DROP SCHEMA spcs1, spcs2; -DROP TABLESPACE schemaspc; -DROP TABLESPACE schemaspc2; -DROP TABLESPACE "Schemaspc3"; diff --git a/tests/regress/expected7/test_tablespace_schema_perseg.out b/tests/regress/expected7/test_tablespace_schema_perseg.out deleted file mode 100644 index c27f3e0ea9e..00000000000 --- a/tests/regress/expected7/test_tablespace_schema_perseg.out +++ /dev/null @@ -1,282 +0,0 @@ --- Test schema --- start_ignore -\! mkdir -p /tmp/schemaspc_perseg --- end_ignore --- Test tablespace quota perseg -CREATE SCHEMA spcs1_perseg; -DROP TABLESPACE IF EXISTS schemaspc_perseg; -NOTICE: tablespace "schemaspc_perseg" does not exist, skipping -CREATE TABLESPACE schemaspc_perseg LOCATION '/tmp/schemaspc_perseg'; -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SET search_path TO spcs1_perseg; -CREATE TABLE a(i int) TABLESPACE schemaspc_perseg DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); --- expect insert success -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail by tablespace schema diskquota -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded --- change tablespace schema quota -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg', '10 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+------------------+-------------+----------------------------- - spcs1_perseg | schemaspc_perseg | 10 | 3932160 -(1 row) - -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - ----- expect insert fail by tablespace schema perseg quota -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota --- Test alter table set schema -CREATE SCHEMA spcs2_perseg; -ALTER TABLE spcs1_perseg.a SET SCHEMA spcs2_perseg; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO spcs2_perseg.a SELECT generate_series(1,200); -ALTER TABLE spcs2_perseg.a SET SCHEMA spcs1_perseg; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes ---------------+------------------+-------------+----------------------------- - spcs1_perseg | schemaspc_perseg | 10 | 3932160 -(1 row) - --- Test alter tablespace --- start_ignore -\! mkdir -p /tmp/schemaspc_perseg2 --- end_ignore -DROP TABLESPACE IF EXISTS "Schemaspc_perseg2"; -NOTICE: tablespace "Schemaspc_perseg2" does not exist, skipping -CREATE TABLESPACE "Schemaspc_perseg2" LOCATION '/tmp/schemaspc_perseg2'; -ALTER TABLE a SET TABLESPACE "Schemaspc_perseg2"; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,200); -ALTER TABLE a SET TABLESPACE schemaspc_perseg; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,200); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota --- Test update per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 3.1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - ----- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota --- Test delete per segment ratio -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', -1); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 0.123); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - ----- expect insert fail -INSERT INTO a SELECT generate_series(1,100); -ERROR: tablespace: schemaspc_perseg, schema: spcs1_perseg diskquota exceeded per segment quota --- Test delete tablespace schema quota -SELECT diskquota.set_per_segment_quota('schemaspc_perseg', 2); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT diskquota.set_schema_tablespace_quota('spcs1_perseg', 'schemaspc_perseg','-1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert success -INSERT INTO a SELECT generate_series(1,100); -SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name = 'spcs1_perseg' and tablespace_name ='schemaspc_perseg'; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- -(0 rows) - --- test config per segment quota -SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','1'); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; - segratio ----------- - 1 -(1 row) - -SELECT diskquota.set_schema_tablespace_quota('spcs2_perseg', '"Schemaspc_perseg2"','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target - WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND - diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; - segratio ----------- - 1 -(1 row) - -SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','-2'); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; - segratio ----------- -(0 rows) - -SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target - WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND - diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; - segratio ----------- - 0 -(1 row) - -SELECT diskquota.set_per_segment_quota('"Schemaspc_perseg2"','3'); - set_per_segment_quota ------------------------ - -(1 row) - -SELECT distinct(segratio) from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'Schemaspc_perseg2'; - segratio ----------- - 3 -(1 row) - -SELECT distinct(segratio) FROM diskquota.quota_config, pg_namespace, diskquota.target - WHERE diskquota.quota_config.targetoid = diskquota.target.rowId AND - diskquota.target.primaryOid = pg_namespace.oid AND nspname = 'spcs2_perseg'; - segratio ----------- - 3 -(1 row) - -SELECT tablespace_name, per_seg_quota_ratio FROM diskquota.show_segment_ratio_quota_view where tablespace_name in ('Schemaspc_perseg2', 'schemaspc_perseg'); - tablespace_name | per_seg_quota_ratio --------------------+--------------------- - schemaspc_perseg | 2 - Schemaspc_perseg2 | 3 -(2 rows) - -RESET search_path; -DROP TABLE spcs1_perseg.a; -DROP SCHEMA spcs1_perseg; -DROP TABLESPACE schemaspc_perseg; -DROP TABLESPACE "Schemaspc_perseg2"; diff --git a/tests/regress/expected7/test_temp_role.out b/tests/regress/expected7/test_temp_role.out deleted file mode 100644 index 4493325717e..00000000000 --- a/tests/regress/expected7/test_temp_role.out +++ /dev/null @@ -1,40 +0,0 @@ --- Test temp table restrained by role id -CREATE SCHEMA strole; -CREATE ROLE u3temp NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" -SET search_path TO strole; -SELECT diskquota.set_role_quota('u3temp', '1MB'); - set_role_quota ----------------- - -(1 row) - -CREATE TABLE a(i int) DISTRIBUTED BY (i); -ALTER TABLE a OWNER TO u3temp; -CREATE TEMP TABLE ta(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -ALTER TABLE ta OWNER TO u3temp; --- expected failed: fill temp table -INSERT INTO ta SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expected failed: -INSERT INTO a SELECT generate_series(1,100); -ERROR: role's disk space quota exceeded with name: u3temp -DROP TABLE ta; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -INSERT INTO a SELECT generate_series(1,100); -DROP TABLE a; -DROP ROLE u3temp; -RESET search_path; -DROP SCHEMA strole; diff --git a/tests/regress/expected7/test_toast.out b/tests/regress/expected7/test_toast.out deleted file mode 100644 index df0b0c154c2..00000000000 --- a/tests/regress/expected7/test_toast.out +++ /dev/null @@ -1,31 +0,0 @@ --- Test toast -CREATE SCHEMA s5; -SELECT diskquota.set_schema_quota('s5', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s5; -CREATE TABLE a5 (t text) DISTRIBUTED BY (t); -INSERT INTO a5 -SELECT (SELECT - string_agg(chr(floor(random() * 26)::int + 65), '') - FROM generate_series(1,10000)) -FROM generate_series(1,10000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert toast fail -INSERT INTO a5 -SELECT (SELECT - string_agg(chr(floor(random() * 26)::int + 65), '') - FROM generate_series(1,1000)) -FROM generate_series(1,1000); -ERROR: schema's disk space quota exceeded with name: s5 -DROP TABLE a5; -RESET search_path; -DROP SCHEMA s5; diff --git a/tests/regress/expected7/test_truncate.out b/tests/regress/expected7/test_truncate.out deleted file mode 100644 index a9fd12392d6..00000000000 --- a/tests/regress/expected7/test_truncate.out +++ /dev/null @@ -1,36 +0,0 @@ --- Test truncate -CREATE SCHEMA s7; -SELECT diskquota.set_schema_quota('s7', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s7; -CREATE TABLE a (i int) DISTRIBUTED BY (i); -CREATE TABLE b (i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,30); -ERROR: schema's disk space quota exceeded with name: s7 -INSERT INTO b SELECT generate_series(1,30); -ERROR: schema's disk space quota exceeded with name: s7 -TRUNCATE TABLE a; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,30); -INSERT INTO b SELECT generate_series(1,30); -DROP TABLE a, b; -RESET search_path; -DROP SCHEMA s7; diff --git a/tests/regress/expected7/test_update.out b/tests/regress/expected7/test_update.out deleted file mode 100644 index 5ddb9d8c55b..00000000000 --- a/tests/regress/expected7/test_update.out +++ /dev/null @@ -1,23 +0,0 @@ --- Test Update -CREATE SCHEMA s4; -SELECT diskquota.set_schema_quota('s4', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s4; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect update fail. -UPDATE a SET i = 100; -ERROR: schema's disk space quota exceeded with name: s4 -DROP TABLE a; -RESET search_path; -DROP SCHEMA s4; diff --git a/tests/regress/expected7/test_update_db_cache.out b/tests/regress/expected7/test_update_db_cache.out deleted file mode 100644 index 785c8bff409..00000000000 --- a/tests/regress/expected7/test_update_db_cache.out +++ /dev/null @@ -1,64 +0,0 @@ ---start_ignore -CREATE DATABASE test_db_cache; ---end_ignore -\c test_db_cache -CREATE EXTENSION diskquota; -CREATE EXTENSION diskquota_test; --- Wait until the db cache gets updated -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE t(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 't'::regclass -ORDER BY segid; - tableid | size | segid ----------+---------+------- - t | 3637248 | -1 - t | 1212416 | 0 - t | 1212416 | 1 - t | 1212416 | 2 -(4 rows) - -DROP EXTENSION diskquota; --- Create table without extension -CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -CREATE EXTENSION diskquota; -WARNING: [diskquota] diskquota is not ready because current database is not empty -HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - wait ------- - t -(1 row) - --- Should find nothing since t_no_extension is not recorded. -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - diskquota_fetch_table_stat ----------------------------- -(0 rows) - -DROP TABLE t; -DROP TABLE t_no_extension; -SELECT diskquota.pause(); - pause -------- - -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_db_cache; diff --git a/tests/regress/expected7/test_vacuum.out b/tests/regress/expected7/test_vacuum.out deleted file mode 100644 index b032274eed3..00000000000 --- a/tests/regress/expected7/test_vacuum.out +++ /dev/null @@ -1,57 +0,0 @@ --- Test vacuum full -CREATE SCHEMA s6; -SELECT diskquota.set_schema_quota('s6', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -SET search_path TO s6; -CREATE TABLE a (i int) DISTRIBUTED BY (i); -CREATE TABLE b (i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect insert fail -INSERT INTO a SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: s6 --- expect insert fail -INSERT INTO b SELECT generate_series(1,10); -ERROR: schema's disk space quota exceeded with name: s6 -DELETE FROM a WHERE i > 10; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -VACUUM FULL a; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid from diskquota.table_size WHERE tableid::regclass::name NOT LIKE '%.%' ORDER BY size, segid DESC; - tableid | size | segid ----------+-------+------- - b | 0 | 2 - b | 0 | 1 - b | 0 | 0 - b | 0 | -1 - a | 32768 | 2 - a | 32768 | 1 - a | 32768 | 0 - a | 98304 | -1 -(8 rows) - --- expect insert succeed -INSERT INTO a SELECT generate_series(1,10); -INSERT INTO b SELECT generate_series(1,10); -DROP TABLE a, b; -RESET search_path; -DROP SCHEMA s6; diff --git a/tests/regress/expected7/test_worker_not_ready.out b/tests/regress/expected7/test_worker_not_ready.out deleted file mode 100644 index 0424cb65d73..00000000000 --- a/tests/regress/expected7/test_worker_not_ready.out +++ /dev/null @@ -1,26 +0,0 @@ -CREATE DATABASE db_not_ready; -\c db_not_ready; -CREATE TABLE t (i int) DISTRIBUTED BY (i); -CREATE EXTENSION diskquota; -WARNING: [diskquota] diskquota is not ready because current database is not empty -HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -CREATE EXTENSION diskquota_test; -SELECT diskquota.set_role_quota(CURRENT_ROLE, '1 MB'); -ERROR: Can not set disk quota for system owner: zhrt -SELECT diskquota.pause(); - pause -------- - -(1 row) - --- diskquota.wait_for_worker_new_epoch() cannot be used here because --- diskquota.state is not clean. -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - wait ------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE db_not_ready; diff --git a/tests/regress/expected7/test_worker_schedule.out b/tests/regress/expected7/test_worker_schedule.out deleted file mode 100644 index ad018a37c2a..00000000000 --- a/tests/regress/expected7/test_worker_schedule.out +++ /dev/null @@ -1,633 +0,0 @@ --- start_ignore -\c -DROP DATABASE IF EXISTS t1; -NOTICE: database "t1" does not exist, skipping -DROP DATABASE IF EXISTS t2; -NOTICE: database "t2" does not exist, skipping -DROP DATABASE IF EXISTS t3; -NOTICE: database "t3" does not exist, skipping -DROP DATABASE IF EXISTS t4; -NOTICE: database "t4" does not exist, skipping -DROP DATABASE IF EXISTS t5; -NOTICE: database "t5" does not exist, skipping -DROP DATABASE IF EXISTS t6; -NOTICE: database "t6" does not exist, skipping -DROP DATABASE IF EXISTS t7; -NOTICE: database "t7" does not exist, skipping -DROP DATABASE IF EXISTS t8; -NOTICE: database "t8" does not exist, skipping -DROP DATABASE IF EXISTS t9; -NOTICE: database "t9" does not exist, skipping -DROP DATABASE IF EXISTS t10; -NOTICE: database "t10" does not exist, skipping -DROP DATABASE IF EXISTS t11; -NOTICE: database "t11" does not exist, skipping -DROP DATABASE IF EXISTS t12; -NOTICE: database "t12" does not exist, skipping -CREATE DATABASE t1; -CREATE DATABASE t2; -CREATE DATABASE t3; -CREATE DATABASE t4; -CREATE DATABASE t5; -CREATE DATABASE t6; -CREATE DATABASE t7; -CREATE DATABASE t8; -CREATE DATABASE t9; -CREATE DATABASE t10; -CREATE DATABASE t11; -CREATE DATABASE t12; ---end_ignore -\c t1 -CREATE EXTENSION diskquota; -CREATE TABLE f1(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f1 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f1 | 98304 | -1 -(1 row) - ---start_ignore -\! gpconfig -c diskquota.max_workers -v 1; -20230117:13:00:12:1977590 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 1' -\! gpstop -arf; -20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:00:12:1978021 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:13:00:13:1978021 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:13:00:14:1978021 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore -\c -SHOW diskquota.max_workers; - diskquota.max_workers ------------------------ - 1 -(1 row) - -\c t2 -CREATE EXTENSION diskquota; -CREATE TABLE f2(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f2 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f2 | 98304 | -1 -(1 row) - -\c t3 -CREATE EXTENSION diskquota; -CREATE TABLE f3(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f3 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f3'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f3 | 98304 | -1 -(1 row) - ---start_ignore -\! gpconfig -c diskquota.max_workers -v 11; -20230117:13:02:24:1981283 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 11' -\! gpstop -arf; -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:02:24:1981861 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:13:02:25:1981861 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore -\c -SHOW diskquota.max_workers; - diskquota.max_workers ------------------------ - 11 -(1 row) - -\c t4 -CREATE EXTENSION diskquota; -CREATE TABLE f4(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f4 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f4'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f4 | 98304 | -1 -(1 row) - -\c t5 -CREATE EXTENSION diskquota; -CREATE TABLE f5(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f5 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f5'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f5 | 98304 | -1 -(1 row) - -\c t6 -CREATE EXTENSION diskquota; -CREATE TABLE f6(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f6 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f6'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f6 | 98304 | -1 -(1 row) - -\c t7 -CREATE EXTENSION diskquota; -CREATE TABLE f7(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f7 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f7 | 98304 | -1 -(1 row) - -\c t8 -CREATE EXTENSION diskquota; -CREATE TABLE f8(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f8 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f8'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f8 | 98304 | -1 -(1 row) - -\c t9 -CREATE EXTENSION diskquota; -CREATE TABLE f9(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f9 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f9'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f9 | 98304 | -1 -(1 row) - -\c t10 -CREATE EXTENSION diskquota; -CREATE TABLE f10(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f10 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f10'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f10 | 98304 | -1 -(1 row) - -\c t11 -CREATE EXTENSION diskquota; -CREATE TABLE f11(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f11 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f11'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f11 | 98304 | -1 -(1 row) - -\c t1 -INSERT into f1 SELECT generate_series(0,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - f1 | 3997696 | -1 -(1 row) - -\c t7 -INSERT into f7 SELECT generate_series(0,100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f7'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - f7 | 3997696 | -1 -(1 row) - -\c t1 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -DROP TABLE f1; -CREATE EXTENSION diskquota; -CREATE TABLE f1(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f1 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f1'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f1 | 98304 | -1 -(1 row) - -\c t2 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -DROP TABLE f2; -CREATE EXTENSION diskquota; -CREATE TABLE f2(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f2 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f2'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f2 | 98304 | -1 -(1 row) - -\c t3 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t4 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t5 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t6 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t7 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t8 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t9 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t10 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t11 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t12 -CREATE EXTENSION diskquota; -CREATE TABLE f12(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT into f12 SELECT generate_series(0,1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'f12'::regclass and segid = -1; - tableid | size | segid ----------+-------+------- - f12 | 98304 | -1 -(1 row) - -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t1 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c t2 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; ---start_ignore -\c contrib_regression -DROP DATABASE t1; -DROP DATABASE t2; -DROP DATABASE t3; -DROP DATABASE t4; -DROP DATABASE t5; -DROP DATABASE t6; -DROP DATABASE t7; -DROP DATABASE t8; -DROP DATABASE t9; -DROP DATABASE t10; -DROP DATABASE t11; -DROP DATABASE t12; -\! gpconfig -r diskquota.worker_timeout; -20230117:13:04:52:2001815 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' -\! gpconfig -r diskquota.max_workers; -20230117:13:04:53:2002403 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' -\! gpstop -arf; -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:04:53:2003022 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:13:04:54:2003022 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:13:04:55:2003022 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore diff --git a/tests/regress/expected7/test_worker_schedule_exception.out b/tests/regress/expected7/test_worker_schedule_exception.out deleted file mode 100644 index aeb8e5d85be..00000000000 --- a/tests/regress/expected7/test_worker_schedule_exception.out +++ /dev/null @@ -1,113 +0,0 @@ --- start_ignore -\! gpconfig -c diskquota.max_workers -v 10; -20230117:13:07:03:2006049 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_workers -v 10' -\! gpconfig -c diskquota.naptime -v 4; -20230117:13:07:04:2006587 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 4' -\! gpstop -arf; -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:07:04:2007250 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:13:07:05:2007250 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:13:07:06:2007250 gpstop:zhrt:zhrt-[INFO]:-Restarting System... -\c -DROP DATABASE IF EXISTS t1; -NOTICE: database "t1" does not exist, skipping -DROP DATABASE IF EXISTS t2; -NOTICE: database "t2" does not exist, skipping ---end_ignore -CREATE DATABASE t1; -CREATE DATABASE t2; -\c t1 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\! pgrep -f "[p]ostgres.*bgworker.*t1" | xargs kill; -\! sleep 0.5 ; ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep | wc -l -2 --- start_ignore -\! ps -ef | grep postgres | grep "\[diskquota]" | grep -v grep -zhrt 2009311 2009263 10 13:09 ? 00:00:00 postgres: 7000, [diskquota] - launcher -zhrt 2009361 2009263 1 13:09 ? 00:00:00 postgres: 7000, bgworker: [diskquota] contrib_regression con8 cmd1 ---end_ignore -\c contrib_regression -DROP DATABASE t1; -\c t2 -CREATE EXTENSION diskquota; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -\c t2 -SELECT diskquota.pause(); - pause -------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE t2; ---start_ignore -\! gpconfig -r diskquota.naptime; -20230117:13:09:27:2009995 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.naptime' -\! gpconfig -r diskquota.max_workers; -20230117:13:09:27:2010164 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_workers' -\! gpstop -arf; -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -arf -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Coordinator catalog information -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from coordinator... -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 7.0.0-alpha.0+dev.16171.g005ee83c46 build dev' -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' -20230117:13:09:27:2010416 gpstop:zhrt:zhrt-[INFO]:-Coordinator segment instance directory=/home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover coordinator process -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Stopping coordinator standby host zhrt mode=fast -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... -20230117:13:09:28:2010416 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported -20230117:13:09:29:2010416 gpstop:zhrt:zhrt-[INFO]:-Restarting System... ---end_ignore From ddeb628c0f231e8920cdb0e019c0a102b0a5ea0b Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 16 Feb 2023 13:09:41 +0800 Subject: [PATCH 263/330] Fix update test failures caused by segid diff (#305) Due the GPDB change b80e969844, upgrade_test failed because the segid in the view dump is surrounded by quotes. Expected failure diff: -WHERE (table_size.segid = (-1)))) AS dbsize; +WHERE (table_size.segid = '-1'::integer))) AS dbsize; --- upgrade_test/expected/2.0_catalog.out | 10 +++++----- upgrade_test/expected/2.1_catalog.out | 10 +++++----- upgrade_test/expected/2.2_catalog.out | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/upgrade_test/expected/2.0_catalog.out b/upgrade_test/expected/2.0_catalog.out index 7d7aa740bd5..73b0501334f 100644 --- a/upgrade_test/expected/2.0_catalog.out +++ b/upgrade_test/expected/2.0_catalog.out @@ -160,13 +160,13 @@ ORDER by | | FROM pg_class + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + | | FROM diskquota.table_size + - | | WHERE (table_size.segid = (-1)))) AS dbsize; + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + | | SELECT pg_class.relowner, + | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY pg_class.relowner + | | ) + | | SELECT pg_roles.rolname AS role_name, + @@ -191,7 +191,7 @@ ORDER by | | FROM diskquota.table_size, + | | pg_class, + | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + @@ -216,7 +216,7 @@ ORDER by | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY pg_class.relnamespace + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + @@ -241,7 +241,7 @@ ORDER by | | FROM diskquota.table_size, + | | pg_class, + | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + diff --git a/upgrade_test/expected/2.1_catalog.out b/upgrade_test/expected/2.1_catalog.out index 7582b33a2b1..b22cec877a0 100644 --- a/upgrade_test/expected/2.1_catalog.out +++ b/upgrade_test/expected/2.1_catalog.out @@ -191,13 +191,13 @@ ORDER by | | FROM pg_class + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + | | FROM diskquota.table_size + - | | WHERE (table_size.segid = (-1)))) AS dbsize; + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + | | SELECT show_all_relation_view.relowner, + | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relowner + | | ) + | | SELECT pg_roles.rolname AS role_name, + @@ -222,7 +222,7 @@ ORDER by | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view, + | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + @@ -247,7 +247,7 @@ ORDER by | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relnamespace + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + @@ -272,7 +272,7 @@ ORDER by | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view, + | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + diff --git a/upgrade_test/expected/2.2_catalog.out b/upgrade_test/expected/2.2_catalog.out index 287a353e8d1..5654d0fb781 100644 --- a/upgrade_test/expected/2.2_catalog.out +++ b/upgrade_test/expected/2.2_catalog.out @@ -198,13 +198,13 @@ ORDER by | | FROM pg_class + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + | | FROM diskquota.table_size + - | | WHERE (table_size.segid = (-1)))) AS dbsize; + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + | | SELECT show_all_relation_view.relowner, + | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relowner + | | ) + | | SELECT pg_roles.rolname AS role_name, + @@ -229,7 +229,7 @@ ORDER by | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view, + | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + @@ -254,7 +254,7 @@ ORDER by | | sum(table_size.size) AS total_size + | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relnamespace + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + @@ -279,7 +279,7 @@ ORDER by | | FROM diskquota.table_size, + | | diskquota.show_all_relation_view, + | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = (-1))) + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + | | ), full_quota_config AS ( + | | SELECT target.primaryoid, + From b2b41a388f640dccf1310681f02dfad07c63f28b Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 16 Feb 2023 14:22:06 +0800 Subject: [PATCH 264/330] Replace relation_open/relation_close with RelationIdGetRelation/RelationClose. (#300) This PR replaces relation_open()/relation_close() with RelationIdGetRelation()/RelationClose() to avoid deadlock. We can only call relation_open() with AccessSharedLock in object_access_hook(OAT_POST_CREATE) in GPDB7, which may cause deadlock. While RelationIdGetRelation()/RelationClose() just locks pg_class instead of user-defined relation, so we use these functions to get the information of relations. --- diskquota.h | 2 +- diskquota_utility.c | 6 +++--- gp_activetable.c | 26 +++++++++++++------------- relation_cache.c | 29 ++++++++--------------------- 4 files changed, 25 insertions(+), 38 deletions(-) diff --git a/diskquota.h b/diskquota.h index a52037cf72e..0bd5ab30743 100644 --- a/diskquota.h +++ b/diskquota.h @@ -265,7 +265,7 @@ extern int worker_spi_get_extension_version(int *major, int *minor); extern void truncateStringInfo(StringInfo str, int nchars); extern List *get_rel_oid_list(void); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); -extern Relation diskquota_relation_open(Oid relid, LOCKMODE mode); +extern Relation diskquota_relation_open(Oid relid); extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); extern List *diskquota_get_index_list(Oid relid); extern void diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirrelid, Oid *visimaprelid); diff --git a/diskquota_utility.c b/diskquota_utility.c index da9bc080fe2..f406809c875 100644 --- a/diskquota_utility.c +++ b/diskquota_utility.c @@ -1441,7 +1441,7 @@ relation_size_local(PG_FUNCTION_ARGS) } Relation -diskquota_relation_open(Oid relid, LOCKMODE mode) +diskquota_relation_open(Oid relid) { Relation rel; bool success_open = false; @@ -1449,8 +1449,8 @@ diskquota_relation_open(Oid relid, LOCKMODE mode) PG_TRY(); { - rel = relation_open(relid, mode); - success_open = true; + rel = RelationIdGetRelation(relid); + if (rel) success_open = true; } PG_CATCH(); { diff --git a/gp_activetable.c b/gp_activetable.c index 4cf2144fe5b..1b2d6842da1 100644 --- a/gp_activetable.c +++ b/gp_activetable.c @@ -246,6 +246,7 @@ report_relation_cache_helper(Oid relid) { bool found; Relation rel; + char relkind; /* We do not collect the active table in mirror segments */ if (IsRoleMirror()) @@ -265,20 +266,19 @@ report_relation_cache_helper(Oid relid) { return; } -#if GP_VERSION_NUM < 70000 - rel = diskquota_relation_open(relid, NoLock); -#else - rel = diskquota_relation_open(relid, AccessShareLock); -#endif /* GP_VERSION_NUM */ - if (rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && rel->rd_rel->relkind != RELKIND_COMPOSITE_TYPE && - rel->rd_rel->relkind != RELKIND_VIEW) - update_relation_cache(relid); -#if GP_VERSION_NUM < 70000 - relation_close(rel, NoLock); -#else - relation_close(rel, AccessShareLock); -#endif /* GP_VERSION_NUM */ + rel = diskquota_relation_open(relid); + if (rel == NULL) + { + return; + } + + relkind = rel->rd_rel->relkind; + + RelationClose(rel); + + if (relkind != RELKIND_FOREIGN_TABLE && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_VIEW) + update_relation_cache(relid); } /* diff --git a/relation_cache.c b/relation_cache.c index 352852e9348..647779deee3 100644 --- a/relation_cache.c +++ b/relation_cache.c @@ -136,11 +136,7 @@ static void update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, DiskQuotaRelidCacheEntry *relid_entry) { Relation rel; -#if GP_VERSION_NUM < 70000 - rel = diskquota_relation_open(relid, NoLock); -#else - rel = diskquota_relation_open(relid, AccessShareLock); -#endif /* GP_VERSION_NUM */ + rel = diskquota_relation_open(relid); if (rel == NULL) { @@ -166,11 +162,7 @@ update_relation_entry(Oid relid, DiskQuotaRelationCacheEntry *relation_entry, Di relation_entry->primary_table_relid = relid; -#if GP_VERSION_NUM < 70000 - relation_close(rel, NoLock); -#else - relation_close(rel, AccessShareLock); -#endif /* GP_VERSION_NUM */ + RelationClose(rel); } void @@ -228,22 +220,16 @@ parse_primary_table_oid(Oid relid, bool on_bgworker) } else { -#if GP_VERSION_NUM < 70000 - rel = diskquota_relation_open(relid, NoLock); -#else - rel = diskquota_relation_open(relid, AccessShareLock); -#endif /* GP_VERSION_NUM */ + rel = diskquota_relation_open(relid); + if (rel == NULL) { return InvalidOid; } namespace = rel->rd_rel->relnamespace; memcpy(relname, rel->rd_rel->relname.data, NAMEDATALEN); -#if GP_VERSION_NUM < 70000 - relation_close(rel, NoLock); -#else - relation_close(rel, AccessShareLock); -#endif /* GP_VERSION_NUM */ + + RelationClose(rel); } parsed_oid = diskquota_parse_primary_table_oid(namespace, relname); @@ -331,7 +317,8 @@ show_relation_cache(PG_FUNCTION_ARGS) { HASH_SEQ_STATUS iter; HTAB *relation_cache; - } * relation_cache_ctx; + }; + struct RelationCacheCtx *relation_cache_ctx; if (SRF_IS_FIRSTCALL()) { From d86ead2afdcc4a1f0bdc5a972cfd3ee691c9a282 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Mon, 20 Feb 2023 17:27:37 +0800 Subject: [PATCH 265/330] Fix flaky test of test_rejectmap_mul_db (#301) Add back the pause in the test, to avoid the changes of active table. --- tests/regress/expected/test_rejectmap_mul_db.out | 15 ++++++++++++++- tests/regress/sql/test_rejectmap_mul_db.sql | 3 +++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/tests/regress/expected/test_rejectmap_mul_db.out b/tests/regress/expected/test_rejectmap_mul_db.out index 78a10aad70f..8ac4193c4fc 100644 --- a/tests/regress/expected/test_rejectmap_mul_db.out +++ b/tests/regress/expected/test_rejectmap_mul_db.out @@ -22,7 +22,14 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu1 INSERT INTO b SELECT generate_series(1, 100000000); -- fail -ERROR: schema's disk space quota exceeded with name: 2200 (seg1 127.0.0.1:8003 pid=43782) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=87165) +-- FIXME: Pause to avoid tjmu1's worker clear the active table. Since there are bugs, this might be flaky. +SELECT diskquota.pause(); + pause +------- + +(1 row) + -- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; count @@ -54,6 +61,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); t (1 row) +SELECT diskquota.pause(); + pause +------- + +(1 row) + --\c tjmu1 -- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 -- The entries for tjmu1 should not be cleared diff --git a/tests/regress/sql/test_rejectmap_mul_db.sql b/tests/regress/sql/test_rejectmap_mul_db.sql index 57fd16db43a..e59647f3428 100644 --- a/tests/regress/sql/test_rejectmap_mul_db.sql +++ b/tests/regress/sql/test_rejectmap_mul_db.sql @@ -16,6 +16,8 @@ CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu1 INSERT INTO b SELECT generate_series(1, 100000000); -- fail +-- FIXME: Pause to avoid tjmu1's worker clear the active table. Since there are bugs, this might be flaky. +SELECT diskquota.pause(); -- The rejectmap should contain entries with dbnode = 0 and dbnode = tjmu1_oid. count = 1 SELECT COUNT(DISTINCT r.dbnode) FROM (SELECT (diskquota.show_rejectmap()).* FROM gp_dist_random('gp_id')) as r where r.dbnode != 0; @@ -27,6 +29,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- Trigger hard limit to dispatch rejectmap for tjmu2 INSERT INTO b SELECT generate_series(1, 100000000); -- fail SELECT diskquota.wait_for_worker_new_epoch(); +SELECT diskquota.pause(); --\c tjmu1 -- The rejectmap should contain entris with dbnode = 0 and dbnode = tjmu1_oid and tjmu2_oid. count = 2 From e56694f80d11d6532df885560cb7f41779fd0be5 Mon Sep 17 00:00:00 2001 From: Xiaoran Wang Date: Tue, 21 Feb 2023 15:22:47 +0800 Subject: [PATCH 266/330] Fix test_primary_failure test case (#303) * subprocess.check_output of python3, needs encoding * Add @PLPYTHON_LANG_STR@ in the test_primary_failure.in.sql --- tests/CMakeLists.txt | 2 +- tests/regress/diskquota_schedule7 | 46 ------------------- .../regress/expected/test_primary_failure.out | 14 ++++-- ...ailure.sql => test_primary_failure.in.sql} | 16 +++++-- 4 files changed, 22 insertions(+), 56 deletions(-) delete mode 100644 tests/regress/diskquota_schedule7 rename tests/regress/sql/{test_primary_failure.sql => test_primary_failure.in.sql} (78%) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 07abaf69569..a2a3c4b7829 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -26,7 +26,7 @@ RegressTarget_Add(regress EXPECTED_DIR ${regress_expected_DIR} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data - SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule${EXPECTED_DIR_SUFFIX} + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} REGRESS_OPTS --load-extension=gp_inject_fault diff --git a/tests/regress/diskquota_schedule7 b/tests/regress/diskquota_schedule7 deleted file mode 100644 index 50dc40e3b60..00000000000 --- a/tests/regress/diskquota_schedule7 +++ /dev/null @@ -1,46 +0,0 @@ -test: config -test: test_create_extension -test: test_readiness_logged -test: test_init_table_size_table -test: test_relation_size -test: test_relation_cache -test: test_uncommitted_table_size -test: test_pause_and_resume -test: test_pause_and_resume_multiple_db -test: test_drop_after_pause -test: test_show_status -test: test_update_db_cache -test: test_quota_view_no_table -# disable this test due to GPDB behavior change -# test: test_table_size -test: test_fast_disk_check -test: test_worker_not_ready -#test: test_insert_after_drop -test: test_role test_schema test_drop_table test_column test_copy test_update test_toast test_truncate test_reschema test_temp_role test_rename test_delete_quota test_mistake test_tablespace_role test_tablespace_schema test_tablespace_role_perseg test_tablespace_schema_perseg test_index test_recreate -test: test_ctas_no_preload_lib -test: test_ctas_before_set_quota -test: test_truncate -test: test_delete_quota -test: test_partition -test: test_vacuum -# plpython is not avilable in gpdb7, should change it to plpython3 -# test: test_primary_failure -test: test_extension -test: test_activetable_limit -test: test_many_active_tables -test: test_fetch_table_stat -test: test_appendonly -test: test_rejectmap -test: test_clean_rejectmap_after_drop -test: test_rejectmap_mul_db -test: test_ctas_pause -test: test_ctas_role -test: test_ctas_schema -test: test_ctas_tablespace_role -test: test_ctas_tablespace_schema -test: test_default_tablespace -test: test_tablespace_diff_schema -test: test_worker_schedule -test: test_worker_schedule_exception -test: test_drop_extension -test: reset_config diff --git a/tests/regress/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out index 99985501666..5f5c18a2360 100644 --- a/tests/regress/expected/test_primary_failure.out +++ b/tests/regress/expected/test_primary_failure.out @@ -21,15 +21,21 @@ returns text as $$ cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir cmd = cmd + '-W -m %s %s' % (command_mode, command) + if 'plpython2u' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; +$$ language plpython2u; create or replace function pg_recoverseg(datadir text, command text) returns text as $$ import subprocess cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; + if 'plpython2u' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') +$$ language plpython2u; CREATE TABLE a(i int) DISTRIBUTED BY (i); NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. diff --git a/tests/regress/sql/test_primary_failure.sql b/tests/regress/sql/test_primary_failure.in.sql similarity index 78% rename from tests/regress/sql/test_primary_failure.sql rename to tests/regress/sql/test_primary_failure.in.sql index 14556741726..cbac6e4cde8 100644 --- a/tests/regress/sql/test_primary_failure.sql +++ b/tests/regress/sql/test_primary_failure.in.sql @@ -1,7 +1,7 @@ CREATE SCHEMA ftsr; SELECT diskquota.set_schema_quota('ftsr', '1 MB'); SET search_path TO ftsr; -create or replace language plpythonu; +create or replace language @PLPYTHON_LANG_STR@; -- -- pg_ctl: -- datadir: data directory of process to target with `pg_ctl` @@ -16,16 +16,22 @@ returns text as $$ cmd = 'pg_ctl -l postmaster.log -D %s ' % datadir cmd = cmd + '-W -m %s %s' % (command_mode, command) + if '@PLPYTHON_LANG_STR@' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; +$$ language @PLPYTHON_LANG_STR@; create or replace function pg_recoverseg(datadir text, command text) returns text as $$ import subprocess cmd = 'gprecoverseg -%s -d %s; exit 0; ' % (command, datadir) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') -$$ language plpythonu; + if '@PLPYTHON_LANG_STR@' == 'plpython2u': + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).replace('.', '') + else: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') +$$ language @PLPYTHON_LANG_STR@; CREATE TABLE a(i int) DISTRIBUTED BY (i); INSERT INTO a SELECT generate_series(1,100); From 97f1f9b46b0941a86cffdb2120b900b55ce58928 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 22 Feb 2023 13:53:55 +0800 Subject: [PATCH 267/330] Move files to src and control dirs. (#307) - Move .c and .h files to src dir. - Move diskquota ddl file to control/ddl dir. - Move diskquota_test--1.0.sql file to control/test dir. --- CMakeLists.txt | 30 ++++--------------- .../ddl/diskquota--1.0--2.0.sql | 0 .../ddl/diskquota--1.0.sql | 0 .../ddl/diskquota--2.0--1.0.sql | 0 .../ddl/diskquota--2.0--2.1.sql | 0 .../ddl/diskquota--2.0.sql | 0 .../ddl/diskquota--2.1--2.0.sql | 0 .../ddl/diskquota--2.1--2.2.sql | 0 .../ddl/diskquota--2.1.sql | 0 .../ddl/diskquota--2.2--2.1.sql | 0 .../ddl/diskquota--2.2.sql | 0 .../ddl/diskquota.control | 0 .../test/diskquota_test--1.0.sql | 0 .../test/diskquota_test.control | 0 diskquota.c => src/diskquota.c | 0 diskquota.h => src/diskquota.h | 0 diskquota_enum.h => src/diskquota_enum.h | 0 .../diskquota_utility.c | 0 enforcement.c => src/enforcement.c | 0 gp_activetable.c => src/gp_activetable.c | 0 gp_activetable.h => src/gp_activetable.h | 0 monitored_db.c => src/monitored_db.c | 0 quotamodel.c => src/quotamodel.c | 0 relation_cache.c => src/relation_cache.c | 0 relation_cache.h => src/relation_cache.h | 0 tests/CMakeLists.txt | 4 +-- 26 files changed, 7 insertions(+), 27 deletions(-) rename diskquota--1.0--2.0.sql => control/ddl/diskquota--1.0--2.0.sql (100%) rename diskquota--1.0.sql => control/ddl/diskquota--1.0.sql (100%) rename diskquota--2.0--1.0.sql => control/ddl/diskquota--2.0--1.0.sql (100%) rename diskquota--2.0--2.1.sql => control/ddl/diskquota--2.0--2.1.sql (100%) rename diskquota--2.0.sql => control/ddl/diskquota--2.0.sql (100%) rename diskquota--2.1--2.0.sql => control/ddl/diskquota--2.1--2.0.sql (100%) rename diskquota--2.1--2.2.sql => control/ddl/diskquota--2.1--2.2.sql (100%) rename diskquota--2.1.sql => control/ddl/diskquota--2.1.sql (100%) rename diskquota--2.2--2.1.sql => control/ddl/diskquota--2.2--2.1.sql (100%) rename diskquota--2.2.sql => control/ddl/diskquota--2.2.sql (100%) rename diskquota.control => control/ddl/diskquota.control (100%) rename diskquota_test--1.0.sql => control/test/diskquota_test--1.0.sql (100%) rename diskquota_test.control => control/test/diskquota_test.control (100%) rename diskquota.c => src/diskquota.c (100%) rename diskquota.h => src/diskquota.h (100%) rename diskquota_enum.h => src/diskquota_enum.h (100%) rename diskquota_utility.c => src/diskquota_utility.c (100%) rename enforcement.c => src/enforcement.c (100%) rename gp_activetable.c => src/gp_activetable.c (100%) rename gp_activetable.h => src/gp_activetable.h (100%) rename monitored_db.c => src/monitored_db.c (100%) rename quotamodel.c => src/quotamodel.c (100%) rename relation_cache.c => src/relation_cache.c (100%) rename relation_cache.h => src/relation_cache.h (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 62111646cff..a79be595997 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -64,31 +64,11 @@ add_compile_definitions( DISKQUOTA_PATCH_VERSION=${DISKQUOTA_PATCH_VERSION} DISKQUOTA_BINARY_NAME="${DISKQUOTA_BINARY_NAME}") -list( - APPEND - diskquota_SRC - diskquota.c - diskquota_utility.c - enforcement.c - gp_activetable.c - quotamodel.c - relation_cache.c - monitored_db.c) - -list( - APPEND - diskquota_DDL - diskquota.control - diskquota--1.0.sql - diskquota--1.0--2.0.sql - diskquota--2.0.sql - diskquota--2.0--1.0.sql - diskquota--2.1.sql - diskquota--2.0--2.1.sql - diskquota--2.1--2.0.sql - diskquota--2.2.sql - diskquota--2.1--2.2.sql - diskquota--2.2--2.1.sql) +set(SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src") +file(GLOB diskquota_SRC "${SRC_DIR}/*.c") + +set(DISKQUOTA_DDL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/control/ddl") +file(GLOB diskquota_DDL "${DISKQUOTA_DDL_DIR}/*") add_library(diskquota MODULE ${diskquota_SRC}) diff --git a/diskquota--1.0--2.0.sql b/control/ddl/diskquota--1.0--2.0.sql similarity index 100% rename from diskquota--1.0--2.0.sql rename to control/ddl/diskquota--1.0--2.0.sql diff --git a/diskquota--1.0.sql b/control/ddl/diskquota--1.0.sql similarity index 100% rename from diskquota--1.0.sql rename to control/ddl/diskquota--1.0.sql diff --git a/diskquota--2.0--1.0.sql b/control/ddl/diskquota--2.0--1.0.sql similarity index 100% rename from diskquota--2.0--1.0.sql rename to control/ddl/diskquota--2.0--1.0.sql diff --git a/diskquota--2.0--2.1.sql b/control/ddl/diskquota--2.0--2.1.sql similarity index 100% rename from diskquota--2.0--2.1.sql rename to control/ddl/diskquota--2.0--2.1.sql diff --git a/diskquota--2.0.sql b/control/ddl/diskquota--2.0.sql similarity index 100% rename from diskquota--2.0.sql rename to control/ddl/diskquota--2.0.sql diff --git a/diskquota--2.1--2.0.sql b/control/ddl/diskquota--2.1--2.0.sql similarity index 100% rename from diskquota--2.1--2.0.sql rename to control/ddl/diskquota--2.1--2.0.sql diff --git a/diskquota--2.1--2.2.sql b/control/ddl/diskquota--2.1--2.2.sql similarity index 100% rename from diskquota--2.1--2.2.sql rename to control/ddl/diskquota--2.1--2.2.sql diff --git a/diskquota--2.1.sql b/control/ddl/diskquota--2.1.sql similarity index 100% rename from diskquota--2.1.sql rename to control/ddl/diskquota--2.1.sql diff --git a/diskquota--2.2--2.1.sql b/control/ddl/diskquota--2.2--2.1.sql similarity index 100% rename from diskquota--2.2--2.1.sql rename to control/ddl/diskquota--2.2--2.1.sql diff --git a/diskquota--2.2.sql b/control/ddl/diskquota--2.2.sql similarity index 100% rename from diskquota--2.2.sql rename to control/ddl/diskquota--2.2.sql diff --git a/diskquota.control b/control/ddl/diskquota.control similarity index 100% rename from diskquota.control rename to control/ddl/diskquota.control diff --git a/diskquota_test--1.0.sql b/control/test/diskquota_test--1.0.sql similarity index 100% rename from diskquota_test--1.0.sql rename to control/test/diskquota_test--1.0.sql diff --git a/diskquota_test.control b/control/test/diskquota_test.control similarity index 100% rename from diskquota_test.control rename to control/test/diskquota_test.control diff --git a/diskquota.c b/src/diskquota.c similarity index 100% rename from diskquota.c rename to src/diskquota.c diff --git a/diskquota.h b/src/diskquota.h similarity index 100% rename from diskquota.h rename to src/diskquota.h diff --git a/diskquota_enum.h b/src/diskquota_enum.h similarity index 100% rename from diskquota_enum.h rename to src/diskquota_enum.h diff --git a/diskquota_utility.c b/src/diskquota_utility.c similarity index 100% rename from diskquota_utility.c rename to src/diskquota_utility.c diff --git a/enforcement.c b/src/enforcement.c similarity index 100% rename from enforcement.c rename to src/enforcement.c diff --git a/gp_activetable.c b/src/gp_activetable.c similarity index 100% rename from gp_activetable.c rename to src/gp_activetable.c diff --git a/gp_activetable.h b/src/gp_activetable.h similarity index 100% rename from gp_activetable.h rename to src/gp_activetable.h diff --git a/monitored_db.c b/src/monitored_db.c similarity index 100% rename from monitored_db.c rename to src/monitored_db.c diff --git a/quotamodel.c b/src/quotamodel.c similarity index 100% rename from quotamodel.c rename to src/quotamodel.c diff --git a/relation_cache.c b/src/relation_cache.c similarity index 100% rename from relation_cache.c rename to src/relation_cache.c diff --git a/relation_cache.h b/src/relation_cache.h similarity index 100% rename from relation_cache.h rename to src/relation_cache.h diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a2a3c4b7829..ef754a0a0c0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -50,9 +50,9 @@ RegressTarget_Add(isolation2 add_custom_target(install_test_extension COMMAND - cmake -E copy ${CMAKE_SOURCE_DIR}/diskquota_test.control ${PG_HOME}/share/postgresql/extension + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test.control ${PG_HOME}/share/postgresql/extension COMMAND - cmake -E copy ${CMAKE_SOURCE_DIR}/diskquota_test--1.0.sql ${PG_HOME}/share/postgresql/extension + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test--1.0.sql ${PG_HOME}/share/postgresql/extension ) add_custom_target(installcheck) From a15244f5c2e5618de335f0a4722b21c3fb16b768 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Tue, 7 Mar 2023 17:32:43 +0800 Subject: [PATCH 268/330] Fix gpdb release binary regex (#311) The release candidate contains the build number now. Change the regex to match the latest release candidate for release pipeline. --- concourse/pipeline/res_def.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 54d05adf4f9..ff2f666b5b1 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -172,31 +172,31 @@ resources: source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-centos6.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-centos6.tar.gz - name: bin_gpdb6_centos7 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-centos7.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-centos7.tar.gz - name: bin_gpdb6_rhel8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-rhel8.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-rhel8.tar.gz - name: bin_gpdb6_ubuntu18 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.((9[0-8])|([1-8]?\d))\.(.*)-ubuntu18.04.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-ubuntu18.04.tar.gz - name: bin_gpdb7_rhel8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.((9[0-8])|([1-8]?\d))\.(.*)-rhel8.tar.gz + regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-rhel8.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 From 83a1554dec20aba1558eb64ef4283a70426943b2 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 7 Mar 2023 17:50:17 +0800 Subject: [PATCH 269/330] Enable test_postmaster_restart (#309) The isolation2 test `test_postmaster_restart` is disabled because the postmaster start command is different between GPDB6 and GPDB7. We should enable this test by passing distinct commands for GPDB6 and GPDB7 to test_postmaster_restart.sql. Meanwhile, we can merge isolation2_schedule7 and isolation2_schedule to one. --- tests/CMakeLists.txt | 5 +- .../expected7/test_postmaster_restart.out | 162 ++++++++++++++++++ tests/isolation2/isolation2_schedule7 | 13 -- ...art.sql => test_postmaster_restart.in.sql} | 2 +- 4 files changed, 166 insertions(+), 16 deletions(-) create mode 100644 tests/isolation2/expected7/test_postmaster_restart.out delete mode 100644 tests/isolation2/isolation2_schedule7 rename tests/isolation2/sql/{test_postmaster_restart.sql => test_postmaster_restart.in.sql} (96%) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ef754a0a0c0..ee56ea15ac3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,13 +3,14 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected) list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected) if (${GP_MAJOR_VERSION} EQUAL 7) - set(EXPECTED_DIR_SUFFIX "7") list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected7) list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected7) # PLPYTHON_LANG_STR will be replaced by Regress.cmake set(PLPYTHON_LANG_STR "plpython3u") + set(POSTMASTER_START_CMD "pg_ctl -D $MASTER_DATA_DIRECTORY -w -o \"-c gp_role=dispatch\" start") else() set(PLPYTHON_LANG_STR "plpython2u") + set(POSTMASTER_START_CMD "pg_ctl -D $MASTER_DATA_DIRECTORY -w -o \"-E\" start") endif() set(exclude_fault_injector OFF) @@ -42,7 +43,7 @@ RegressTarget_Add(isolation2 EXPECTED_DIR ${isolation2_expected_DIR} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data - SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule${EXPECTED_DIR_SUFFIX} + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} REGRESS_OPTS --load-extension=gp_inject_fault diff --git a/tests/isolation2/expected7/test_postmaster_restart.out b/tests/isolation2/expected7/test_postmaster_restart.out new file mode 100644 index 00000000000..bf842f49749 --- /dev/null +++ b/tests/isolation2/expected7/test_postmaster_restart.out @@ -0,0 +1,162 @@ +!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode gpstop -u > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA postmaster_restart_s; +CREATE +1: SET search_path TO postmaster_restart_s; +SET + +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 33502 (seg2 127.0.0.1:7004 pid=675047) +1q: ... + +-- launcher should exist +-- [p]ostgres is to filter out the pgrep itself +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +673843 +673846 +673855 +673857 +673872 +673875 +673925 +673943 +673944 + +-- end_ignore +(exited with code 0) +-- bgworker should exist +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +674189 + +-- end_ignore +(exited with code 0) + +-- stop postmaster +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; +-- start_ignore +waiting for server to shut down.... done +server stopped + +-- end_ignore +(exited with code 0) + +-- launcher should be terminated +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +673843 +673846 +673855 +673857 +673872 +673875 + +-- end_ignore +(exited with code 0) +-- bgworker should be terminated +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore + +-- end_ignore +(exited with code 1) + +-- start postmaster +-- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 +-- See https://github.com/greenplum-db/gpdb/pull/9396 +!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-c gp_role=dispatch" start; +-- start_ignore +waiting for server to start....2023-03-06 16:13:41.483928 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","starting PostgreSQL 12.12 (Greenplum Database 7.0.0-beta.1+dev.215.gb9adc4ece5 build dev) on x86_64-pc-linux-gnu, compiled by clang version 15.0.7, 64-bit",,,,,,,,"PostmasterMain","postmaster.c",1237, +2023-03-06 16:13:41.484093 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv4 address ""0.0.0.0"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, +2023-03-06 16:13:41.484153 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv6 address ""::"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, +2023-03-06 16:13:41.484241 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on Unix socket ""/tmp/.s.PGSQL.7000""",,,,,,,,"StreamServerPort","pqcomm.c",625, +2023-03-06 16:13:41.510380 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",929, + done +server started + +-- end_ignore +(exited with code 0) +-- Hopefully the bgworker can be started in 5 seconds +!\retcode sleep 5; +-- start_ignore + +-- end_ignore +(exited with code 0) + +-- launcher should be restarted +!\retcode pgrep -f "[p]ostgres.*launcher"; +-- start_ignore +673843 +673846 +673855 +673857 +673872 +673875 +675198 +675213 +675217 + +-- end_ignore +(exited with code 0) +-- bgworker should be restarted +!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; +-- start_ignore +675239 + +-- end_ignore +(exited with code 0) + +1: SET search_path TO postmaster_restart_s; +SET +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect fail +1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); +ERROR: schema's disk space quota exceeded with name: 33502 (seg2 127.0.0.1:7004 pid=679604) +-- enlarge the quota limits +1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- expect succeed +1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); +CREATE 1000000 + +1: DROP SCHEMA postmaster_restart_s CASCADE; +DROP +1q: ... +!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/isolation2_schedule7 b/tests/isolation2/isolation2_schedule7 deleted file mode 100644 index 04e9b5c7aa5..00000000000 --- a/tests/isolation2/isolation2_schedule7 +++ /dev/null @@ -1,13 +0,0 @@ -test: config -test: test_create_extension -test: test_fast_quota_view -test: test_relation_size -test: test_rejectmap -test: test_vacuum -test: test_truncate -# test: test_postmaster_restart -test: test_worker_timeout -test: test_per_segment_config -test: test_relation_cache -test: test_drop_extension -test: reset_config diff --git a/tests/isolation2/sql/test_postmaster_restart.sql b/tests/isolation2/sql/test_postmaster_restart.in.sql similarity index 96% rename from tests/isolation2/sql/test_postmaster_restart.sql rename to tests/isolation2/sql/test_postmaster_restart.in.sql index 245fd91cb55..bd4def38916 100644 --- a/tests/isolation2/sql/test_postmaster_restart.sql +++ b/tests/isolation2/sql/test_postmaster_restart.in.sql @@ -28,7 +28,7 @@ -- start postmaster -- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 -- See https://github.com/greenplum-db/gpdb/pull/9396 -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; +!\retcode @POSTMASTER_START_CMD@; -- Hopefully the bgworker can be started in 5 seconds !\retcode sleep 5; From ea99897fca77f5f33f6738384364e34053f0fd51 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 16 Mar 2023 11:10:50 +0800 Subject: [PATCH 270/330] Bump cmake min version to 3.20 (#313) Fix #312 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a79be595997..38c64a98c3d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ -cmake_minimum_required(VERSION 3.18) -# file(ARCHIVE_EXTRACT foo) need 3.18 +cmake_minimum_required(VERSION 3.20) +# cmake_path requires 3.20 project(diskquota) From a939bc078f9b041059ecb223504abb816753a3e4 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Mon, 20 Mar 2023 10:22:33 +0800 Subject: [PATCH 271/330] Resource change for gpdb7 binary (#315) """ NOTICE: resource queue required -- using default resource queue "pg_default" """ has been removed in gpdb7 82851a0b85 . Ignore it in the tests. --- concourse/pipeline/job_def.lib.yml | 2 +- concourse/pipeline/res_def.yml | 8 ++++---- tests/init_file | 1 + .../regress/expected/test_clean_rejectmap_after_drop.out | 1 - tests/regress/expected/test_ctas_role.out | 1 - tests/regress/expected/test_ctas_tablespace_role.out | 1 - tests/regress/expected/test_mistake.out | 1 - tests/regress/expected/test_rename.out | 1 - tests/regress/expected/test_role.out | 3 --- tests/regress/expected/test_schema.out | 1 - tests/regress/expected/test_tablespace_role.out | 3 --- tests/regress/expected/test_tablespace_role_perseg.out | 3 --- tests/regress/expected/test_temp_role.out | 1 - upgrade_test/expected/1.0_set_quota.out | 1 - upgrade_test/expected/2.0_set_quota.out | 2 -- upgrade_test/expected/2.1_set_quota.out | 2 -- upgrade_test/expected/2.2_set_quota.out | 2 -- upgrade_test/init_file | 1 + 18 files changed, 7 insertions(+), 28 deletions(-) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml index 78119ad5645..c206ca6e071 100644 --- a/concourse/pipeline/job_def.lib.yml +++ b/concourse/pipeline/job_def.lib.yml @@ -66,7 +66,7 @@ build_type: #@ "Release" if release_build else "Debug" #@ def rhel8_gpdb7_conf(release_build=False): res_build_image: rocky8-gpdb7-image-build res_test_images: [rocky8-gpdb7-image-test, rhel8-gpdb7-image-test] -res_gpdb_bin: #@ "bin_gpdb7_rhel8" + ("" if release_build else "_debug") +res_gpdb_bin: #@ "bin_gpdb7_el8" + ("" if release_build else "_debug") res_diskquota_bin: bin_diskquota_gpdb7_rhel8 res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb7_rhel8_intermediates", release_build) release_bin: bin_diskquota_gpdb7_rhel8_release diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index ff2f666b5b1..4e6578d856a 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -158,12 +158,12 @@ resources: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.debug.tar.gz -- name: bin_gpdb7_rhel8_debug +- name: bin_gpdb7_el8_debug type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.debug.tar.gz + regexp: server/published/main/server-rc-(.*)-el8_x86_64.debug.tar.gz # Latest release candidates, no fault-injector, no assertion: # --disable-debug-extensions --disable-tap-tests --enable-ic-proxy @@ -191,12 +191,12 @@ resources: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-ubuntu18.04.tar.gz -- name: bin_gpdb7_rhel8 +- name: bin_gpdb7_el8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-rhel8.tar.gz + regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-el8.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 diff --git a/tests/init_file b/tests/init_file index ff2ea6bf197..e1b5928c067 100644 --- a/tests/init_file +++ b/tests/init_file @@ -9,6 +9,7 @@ m/WARNING: \[diskquota\] worker not found for database.*/ m/WARNING: \[diskquota\] database .* not found for getting epoch .*/ m/^NOTICE: CREATE TABLE will create partition */ m/^WARNING: skipping .* cannot calculate this foreign table size.*/ +m/^NOTICE: resource queue required -- using default resource queue "pg_default"/ -- end_matchignore -- start_matchsubs diff --git a/tests/regress/expected/test_clean_rejectmap_after_drop.out b/tests/regress/expected/test_clean_rejectmap_after_drop.out index 2c25b6b81fb..4da3507cd1d 100644 --- a/tests/regress/expected/test_clean_rejectmap_after_drop.out +++ b/tests/regress/expected/test_clean_rejectmap_after_drop.out @@ -4,7 +4,6 @@ CREATE EXTENSION diskquota; \! gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null \! gpstop -u > /dev/null CREATE ROLE r; -NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('r', '1MB'); set_role_quota ---------------- diff --git a/tests/regress/expected/test_ctas_role.out b/tests/regress/expected/test_ctas_role.out index be01152985c..d6452140003 100644 --- a/tests/regress/expected/test_ctas_role.out +++ b/tests/regress/expected/test_ctas_role.out @@ -4,7 +4,6 @@ \! gpstop -u > /dev/null -- end_ignore CREATE ROLE hardlimit_r; -NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('hardlimit_r', '1MB'); set_role_quota ---------------- diff --git a/tests/regress/expected/test_ctas_tablespace_role.out b/tests/regress/expected/test_ctas_tablespace_role.out index 6443c3bd585..adc0d95d584 100644 --- a/tests/regress/expected/test_ctas_tablespace_role.out +++ b/tests/regress/expected/test_ctas_tablespace_role.out @@ -9,7 +9,6 @@ DROP TABLESPACE IF EXISTS ctas_rolespc; NOTICE: tablespace "ctas_rolespc" does not exist, skipping CREATE TABLESPACE ctas_rolespc LOCATION '/tmp/ctas_rolespc'; CREATE ROLE hardlimit_r; -NOTICE: resource queue required -- using default resource queue "pg_default" GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; GRANT ALL ON TABLESPACE ctas_rolespc TO hardlimit_r; SELECT diskquota.set_role_tablespace_quota('hardlimit_r', 'ctas_rolespc', '1 MB'); diff --git a/tests/regress/expected/test_mistake.out b/tests/regress/expected/test_mistake.out index fab4c6eb2f7..bd11eb5f1a5 100644 --- a/tests/regress/expected/test_mistake.out +++ b/tests/regress/expected/test_mistake.out @@ -14,7 +14,6 @@ ERROR: disk quota can not be set to 0 MB DROP ROLE IF EXISTS rmistake; NOTICE: role "rmistake" does not exist, skipping CREATE ROLE rmistake; -NOTICE: resource queue required -- using default resource queue "pg_default" select diskquota.set_role_quota('rmistake', '0 MB'); ERROR: disk quota can not be set to 0 MB -- start_ignore diff --git a/tests/regress/expected/test_rename.out b/tests/regress/expected/test_rename.out index ecd470ea3e8..57573b425e8 100644 --- a/tests/regress/expected/test_rename.out +++ b/tests/regress/expected/test_rename.out @@ -37,7 +37,6 @@ DROP SCHEMA srs2; -- test rename role CREATE SCHEMA srr1; CREATE ROLE srerole NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('srerole', '1MB'); set_role_quota ---------------- diff --git a/tests/regress/expected/test_role.out b/tests/regress/expected/test_role.out index f4d6690c736..3f18ab804db 100644 --- a/tests/regress/expected/test_role.out +++ b/tests/regress/expected/test_role.out @@ -2,9 +2,7 @@ CREATE SCHEMA srole; SET search_path TO srole; CREATE ROLE u1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE u2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) DISTRIBUTED BY (t); ALTER TABLE b OWNER TO u1; CREATE TABLE b2 (t TEXT) DISTRIBUTED BY (t); @@ -121,7 +119,6 @@ select diskquota.set_role_quota(:'rolname', '-1mb'); (1 row) CREATE ROLE "Tn" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_quota('Tn', '-1 MB'); -- fail ERROR: role "tn" does not exist SELECT diskquota.set_role_quota('"tn"', '-1 MB'); -- fail diff --git a/tests/regress/expected/test_schema.out b/tests/regress/expected/test_schema.out index 866b4b3e127..a85d161571b 100644 --- a/tests/regress/expected/test_schema.out +++ b/tests/regress/expected/test_schema.out @@ -42,7 +42,6 @@ CREATE SCHEMA badquota; DROP ROLE IF EXISTS testbody; NOTICE: role "testbody" does not exist, skipping CREATE ROLE testbody; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE badquota.t1(i INT) DISTRIBUTED BY (i); ALTER TABLE badquota.t1 OWNER TO testbody; INSERT INTO badquota.t1 SELECT generate_series(0, 100000); diff --git a/tests/regress/expected/test_tablespace_role.out b/tests/regress/expected/test_tablespace_role.out index beed1ae8a38..1d1d165c503 100644 --- a/tests/regress/expected/test_tablespace_role.out +++ b/tests/regress/expected/test_tablespace_role.out @@ -12,9 +12,7 @@ NOTICE: role "rolespcu1" does not exist, skipping DROP ROLE IF EXISTS rolespcu2; NOTICE: role "rolespcu2" does not exist, skipping CREATE ROLE rolespcu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespcu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); CREATE TABLE b2 (t TEXT) TABLESPACE rolespc DISTRIBUTED BY (t); ALTER TABLE b2 OWNER TO rolespcu1; @@ -163,7 +161,6 @@ ERROR: Can not set disk quota for system owner: sa DROP ROLE IF EXISTS "Rolespcu3"; NOTICE: role "Rolespcu3" does not exist, skipping CREATE ROLE "Rolespcu3" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" DROP TABLESPACE IF EXISTS "Rolespc3"; NOTICE: tablespace "Rolespc3" does not exist, skipping CREATE TABLESPACE "Rolespc3" LOCATION '/tmp/rolespc3'; diff --git a/tests/regress/expected/test_tablespace_role_perseg.out b/tests/regress/expected/test_tablespace_role_perseg.out index c30030325d7..eafbb92aef6 100644 --- a/tests/regress/expected/test_tablespace_role_perseg.out +++ b/tests/regress/expected/test_tablespace_role_perseg.out @@ -12,9 +12,7 @@ NOTICE: role "rolespc_persegu1" does not exist, skipping DROP ROLE IF EXISTS rolespc_persegu2; NOTICE: role "rolespc_persegu2" does not exist, skipping CREATE ROLE rolespc_persegu1 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE ROLE rolespc_persegu2 NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" CREATE TABLE b (t TEXT) TABLESPACE rolespc_perseg DISTRIBUTED BY (t); ALTER TABLE b OWNER TO rolespc_persegu1; SELECT diskquota.set_role_tablespace_quota('rolespc_persegu1', 'rolespc_perseg', '1 MB'); @@ -213,7 +211,6 @@ DROP TABLESPACE IF EXISTS "Rolespc_perseg3"; NOTICE: tablespace "Rolespc_perseg3" does not exist, skipping CREATE TABLESPACE "Rolespc_perseg3" LOCATION '/tmp/rolespc_perseg3'; CREATE ROLE "Rolespc_persegu3" NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" SELECT diskquota.set_role_tablespace_quota('"Rolespc_persegu3"', '"Rolespc_perseg3"', '-1 MB'); set_role_tablespace_quota --------------------------- diff --git a/tests/regress/expected/test_temp_role.out b/tests/regress/expected/test_temp_role.out index 5a2462a596b..7896ec17f3d 100644 --- a/tests/regress/expected/test_temp_role.out +++ b/tests/regress/expected/test_temp_role.out @@ -1,7 +1,6 @@ -- Test temp table restrained by role id CREATE SCHEMA strole; CREATE ROLE u3temp NOLOGIN; -NOTICE: resource queue required -- using default resource queue "pg_default" SET search_path TO strole; SELECT diskquota.set_role_quota('u3temp', '1MB'); set_role_quota diff --git a/upgrade_test/expected/1.0_set_quota.out b/upgrade_test/expected/1.0_set_quota.out index d8d661fc412..32ffd2dafd6 100644 --- a/upgrade_test/expected/1.0_set_quota.out +++ b/upgrade_test/expected/1.0_set_quota.out @@ -18,7 +18,6 @@ insert into s1.a select generate_series(1, 10000000); -- ok, but should fail aft -- role quota create schema srole; create role u1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create table srole.b (t text) distributed by (t); alter table srole.b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); diff --git a/upgrade_test/expected/2.0_set_quota.out b/upgrade_test/expected/2.0_set_quota.out index ef0f2b04c0f..ce97cae5581 100644 --- a/upgrade_test/expected/2.0_set_quota.out +++ b/upgrade_test/expected/2.0_set_quota.out @@ -18,7 +18,6 @@ insert into s1.a select generate_series(1, 10000000); -- ok. -- role quota create schema srole; create role u1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create table srole.b (t text) distributed by (t); alter table srole.b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); @@ -44,7 +43,6 @@ insert into spcs1.a select generate_series(1,100000); -- ok. \! mkdir -p /tmp/rolespc create tablespace rolespc location '/tmp/rolespc'; create role rolespcu1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create schema rolespcrole; create table rolespcrole.b (t text) tablespace rolespc distributed by (t); alter table rolespcrole.b owner to rolespcu1; diff --git a/upgrade_test/expected/2.1_set_quota.out b/upgrade_test/expected/2.1_set_quota.out index 5d34aad003a..b40938d638e 100644 --- a/upgrade_test/expected/2.1_set_quota.out +++ b/upgrade_test/expected/2.1_set_quota.out @@ -18,7 +18,6 @@ insert into s1.a select generate_series(1, 10000000); -- ok. -- role quota create schema srole; create role u1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create table srole.b (t text) distributed by (t); alter table srole.b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); @@ -44,7 +43,6 @@ insert into spcs1.a select generate_series(1,100000); -- ok. \! mkdir -p /tmp/rolespc create tablespace rolespc location '/tmp/rolespc'; create role rolespcu1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create schema rolespcrole; create table rolespcrole.b (t text) tablespace rolespc distributed by (t); alter table rolespcrole.b owner to rolespcu1; diff --git a/upgrade_test/expected/2.2_set_quota.out b/upgrade_test/expected/2.2_set_quota.out index 58d8cc0a69c..400f3e5435e 100644 --- a/upgrade_test/expected/2.2_set_quota.out +++ b/upgrade_test/expected/2.2_set_quota.out @@ -18,7 +18,6 @@ insert into s1.a select generate_series(1, 10000000); -- ok. -- role quota create schema srole; create role u1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create table srole.b (t text) distributed by (t); alter table srole.b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); @@ -44,7 +43,6 @@ insert into spcs1.a select generate_series(1,100000); -- ok. \! mkdir -p /tmp/rolespc create tablespace rolespc location '/tmp/rolespc'; create role rolespcu1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create schema rolespcrole; create table rolespcrole.b (t text) tablespace rolespc distributed by (t); alter table rolespcrole.b owner to rolespcu1; diff --git a/upgrade_test/init_file b/upgrade_test/init_file index 5261e4efb5d..a764e9d5254 100644 --- a/upgrade_test/init_file +++ b/upgrade_test/init_file @@ -3,6 +3,7 @@ -- Individual tests can contain additional patterns specific to the test. -- start_matchignore +m/^NOTICE: resource queue required -- using default resource queue "pg_default"/ -- end_matchignore -- start_matchsubs m/diskquota.c:\d+\)/ From 9df0f2eefb225b71bdd44e9889767881bfabd898 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 27 Mar 2023 11:18:23 +0800 Subject: [PATCH 272/330] Fix bug: bgworker enters infinite loop after receiving notices from QE. (#314) We should not initialize the MyProcPort in bgworker, which will cause the database name to not be automatically added to the log message. In normal, the log printed from the bgworker does not contain the database name but contains the bgworker's pid. In order to facilitate debugging, we should print the database name every BGWORKER_LOG_TIME to ensure that we can find the database name by the bgworker's pid in the log file. Co-authored-by: Chen Mulong --- src/diskquota.c | 26 ++++++-- src/gp_activetable.c | 7 +++ .../expected/test_ereport_from_seg.out | 62 +++++++++++++++++++ tests/isolation2/isolation2_schedule | 1 + .../isolation2/sql/test_ereport_from_seg.sql | 26 ++++++++ .../expected/test_readiness_logged.out | 32 ++++++++-- tests/regress/sql/test_readiness_logged.sql | 31 ++++++++-- 7 files changed, 171 insertions(+), 14 deletions(-) create mode 100644 tests/isolation2/expected/test_ereport_from_seg.out create mode 100644 tests/isolation2/sql/test_ereport_from_seg.sql diff --git a/src/diskquota.c b/src/diskquota.c index aa24b6cdff5..0cd4a708dbd 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -90,7 +90,8 @@ pg_atomic_uint32 *diskquota_table_size_entry_num; static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; -#define MIN_SLEEPTIME 100 /* milliseconds */ +#define MIN_SLEEPTIME 100 /* milliseconds */ +#define BGWORKER_LOG_TIME 3600000 /* milliseconds */ /* * bgworker handles, in launcher local memory, @@ -322,9 +323,6 @@ disk_quota_worker_main(Datum main_arg) { char *dbname = MyBgworkerEntry->bgw_name; - MyProcPort = (Port *)calloc(1, sizeof(Port)); - MyProcPort->database_name = dbname; /* To show the database in the log */ - /* Disable ORCA to avoid fallback */ optimizer = false; @@ -480,11 +478,29 @@ disk_quota_worker_main(Datum main_arg) } if (!MyWorkerInfo->dbEntry->inited) update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_RUNNING); - bool is_gang_destroyed = false; + + bool is_gang_destroyed = false; + TimestampTz log_start_timestamp = GetCurrentTimestamp(); + TimestampTz log_end_timestamp; + ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); + while (!got_sigterm) { int rc; + /* + * The log printed from the bgworker does not contain the database name + * but contains the bgworker's pid. We should print the database name + * every BGWORKER_LOG_TIME to ensure that we can find the database name + * by the bgworker's pid in the log file. + */ + log_end_timestamp = GetCurrentTimestamp(); + if (TimestampDifferenceExceeds(log_start_timestamp, log_end_timestamp, BGWORKER_LOG_TIME)) + { + ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); + log_start_timestamp = log_end_timestamp; + } + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); if (!diskquota_is_paused()) { diff --git a/src/gp_activetable.c b/src/gp_activetable.c index 1b2d6842da1..1002dbd1215 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -430,6 +430,13 @@ diskquota_fetch_table_stat(PG_FUNCTION_ARGS) DiskQuotaSetOFCache *cache = NULL; DiskQuotaActiveTableEntry *results_entry = NULL; +#ifdef FAULT_INJECTOR + if (SIMPLE_FAULT_INJECTOR("ereport_warning_from_segment") == FaultInjectorTypeSkip) + { + ereport(WARNING, (errmsg("[Fault Injector] This is a warning reported from segment"))); + } +#endif + /* Init the container list in the first call and get the results back */ if (SRF_IS_FIRSTCALL()) { diff --git a/tests/isolation2/expected/test_ereport_from_seg.out b/tests/isolation2/expected/test_ereport_from_seg.out new file mode 100644 index 00000000000..776bfac6276 --- /dev/null +++ b/tests/isolation2/expected/test_ereport_from_seg.out @@ -0,0 +1,62 @@ +CREATE SCHEMA efs1; +CREATE +SELECT diskquota.set_schema_quota('efs1', '1MB'); + set_schema_quota +------------------ + +(1 row) +CREATE TABLE efs1.t(i int); +CREATE + +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 10000 +-- wait for refresh of diskquota and check the quota size +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 688128 +(1 row) + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 10000 + +-- wait for refresh of diskquota and check whether the quota size changes +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 1081344 +(1 row) + +DROP TABLE efs1.t; +DROP +DROP SCHEMA efs1; +DROP + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 090ea9ad073..090c5cc58f2 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -9,5 +9,6 @@ test: test_postmaster_restart test: test_worker_timeout test: test_per_segment_config test: test_relation_cache +test: test_ereport_from_seg test: test_drop_extension test: reset_config diff --git a/tests/isolation2/sql/test_ereport_from_seg.sql b/tests/isolation2/sql/test_ereport_from_seg.sql new file mode 100644 index 00000000000..79cd25b2956 --- /dev/null +++ b/tests/isolation2/sql/test_ereport_from_seg.sql @@ -0,0 +1,26 @@ +CREATE SCHEMA efs1; +SELECT diskquota.set_schema_quota('efs1', '1MB'); +CREATE TABLE efs1.t(i int); + +INSERT INTO efs1.t SELECT generate_series(1, 10000); +-- wait for refresh of diskquota and check the quota size +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; + +SELECT diskquota.wait_for_worker_new_epoch(); +INSERT INTO efs1.t SELECT generate_series(1, 10000); + +-- wait for refresh of diskquota and check whether the quota size changes +SELECT diskquota.wait_for_worker_new_epoch(); +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + +DROP TABLE efs1.t; +DROP SCHEMA efs1; + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=0; diff --git a/tests/regress/expected/test_readiness_logged.out b/tests/regress/expected/test_readiness_logged.out index c798f08b0ee..ed303e706a3 100644 --- a/tests/regress/expected/test_readiness_logged.out +++ b/tests/regress/expected/test_readiness_logged.out @@ -1,5 +1,27 @@ CREATE DATABASE test_readiness_logged; \c test_readiness_logged +-- Get bgworker's log by database name. +-- 1. select bgworker pid by database name. +-- 2. select logmessage by bgworker pid. +CREATE VIEW logmessage_count_view AS WITH logp AS( + SELECT + MAX(logpid) as max_logpid + FROM + gp_toolkit.__gp_log_master_ext + WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0 +) +SELECT + count(*) +FROM + gp_toolkit.__gp_log_master_ext, + logp +WHERE + logmessage = '[diskquota] diskquota is not ready' + and logpid = max_logpid; CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; WARNING: [diskquota] diskquota is not ready because current database is not empty @@ -11,8 +33,8 @@ SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY' t (1 row) -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; count ------- 1 @@ -26,11 +48,11 @@ SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY' t (1 row) -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; count ------- - 2 + 1 (1 row) DROP EXTENSION diskquota; diff --git a/tests/regress/sql/test_readiness_logged.sql b/tests/regress/sql/test_readiness_logged.sql index 3151393cb0c..562733270aa 100644 --- a/tests/regress/sql/test_readiness_logged.sql +++ b/tests/regress/sql/test_readiness_logged.sql @@ -1,21 +1,44 @@ CREATE DATABASE test_readiness_logged; \c test_readiness_logged +-- Get bgworker's log by database name. +-- 1. select bgworker pid by database name. +-- 2. select logmessage by bgworker pid. +CREATE VIEW logmessage_count_view AS WITH logp AS( + SELECT + MAX(logpid) as max_logpid + FROM + gp_toolkit.__gp_log_master_ext + WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0 +) +SELECT + count(*) +FROM + gp_toolkit.__gp_log_master_ext, + logp +WHERE + logmessage = '[diskquota] diskquota is not ready' + and logpid = max_logpid; + CREATE TABLE t (i int) DISTRIBUTED BY (i); CREATE EXTENSION diskquota; CREATE EXTENSION diskquota_test; SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; \! gpstop -raf > /dev/null \c SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); -SELECT count(*) FROM gp_toolkit.gp_log_database -WHERE logmessage = '[diskquota] diskquota is not ready'; +-- logmessage count should be 1 +SELECT * FROM logmessage_count_view; DROP EXTENSION diskquota; From edbfd5ebc15fccc66965e367c4a885e68e15b744 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 6 Apr 2023 09:46:02 +0800 Subject: [PATCH 273/330] Ignore some hint/notice for GPDB7 (#328) --- tests/init_file | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/init_file b/tests/init_file index e1b5928c067..613ebf85dc0 100644 --- a/tests/init_file +++ b/tests/init_file @@ -10,6 +10,8 @@ m/WARNING: \[diskquota\] database .* not found for getting epoch .*/ m/^NOTICE: CREATE TABLE will create partition */ m/^WARNING: skipping .* cannot calculate this foreign table size.*/ m/^NOTICE: resource queue required -- using default resource queue "pg_default"/ +m/NOTICE: One or more columns in the following table\(s\) do not have statistics: / +m/HINT: For non-partitioned tables, run analyze .+\. For partitioned tables, run analyze rootpartition .+\. See log for columns missing statistics\./ -- end_matchignore -- start_matchsubs From 685b52dcaaf2a4846c351f951bc944c3f8c17ed1 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 6 Apr 2023 11:07:48 +0800 Subject: [PATCH 274/330] fix flaky test: test_appendonly (#327) --- tests/regress/expected/test_appendonly.out | 4 ++-- tests/regress/expected7/test_appendonly.out | 4 ++-- tests/regress/sql/test_appendonly.sql | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index d0c465afd1b..d7dfc2ee059 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -47,14 +47,14 @@ SELECT pg_table_size('t_aoco'); (1 row) -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); set_schema_quota ------------------ (1 row) -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 1000); +INSERT INTO t_ao SELECT generate_series(1, 10000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/expected7/test_appendonly.out b/tests/regress/expected7/test_appendonly.out index d324bd7623e..c2a61cbbb2d 100644 --- a/tests/regress/expected7/test_appendonly.out +++ b/tests/regress/expected7/test_appendonly.out @@ -47,14 +47,14 @@ SELECT pg_table_size('t_aoco'); (1 row) -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); set_schema_quota ------------------ (1 row) -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 1000); +INSERT INTO t_ao SELECT generate_series(1, 10000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/sql/test_appendonly.sql b/tests/regress/sql/test_appendonly.sql index 8852934769c..40337ebaa90 100644 --- a/tests/regress/sql/test_appendonly.sql +++ b/tests/regress/sql/test_appendonly.sql @@ -30,9 +30,9 @@ SELECT tableid::regclass, size SELECT pg_table_size('t_aoco'); -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 1000); +INSERT INTO t_ao SELECT generate_series(1, 10000); SELECT diskquota.wait_for_worker_new_epoch(); From e9cadf2f31db86724219b9300ecc2c679c301567 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 6 Apr 2023 11:10:53 +0800 Subject: [PATCH 275/330] Add dbname to DiskQuotaWorkerEntry (#326) Previously, we passed dbname by bgw_name to bgworker. But refer to https://github.com/greenplum-db/gpdb/blob/f182228992b62e2023e2fac5b4971406abd35c9d/src/backend/postmaster/bgworker.c#L385-L386, bgw_name is copied by ascii_safe_strlcpy(), which replaces non-ASCII bytes with '?'. If the database name is non-ASCII, the bgworker can not connect to the correct database and raises an error. issue: #323 . To solve this problem, we should pass dbname by bgw_main_arg to the main function of the bgworker. --- src/diskquota.c | 14 +++++++--- src/diskquota.h | 3 +- tests/regress/diskquota_schedule | 1 + .../regress/expected/test_dbname_encoding.out | 28 +++++++++++++++++++ tests/regress/sql/test_dbname_encoding.sql | 21 ++++++++++++++ 5 files changed, 62 insertions(+), 5 deletions(-) create mode 100644 tests/regress/expected/test_dbname_encoding.out create mode 100644 tests/regress/sql/test_dbname_encoding.sql diff --git a/src/diskquota.c b/src/diskquota.c index 0cd4a708dbd..b46f39f31b0 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -321,7 +321,12 @@ define_guc_variables(void) void disk_quota_worker_main(Datum main_arg) { - char *dbname = MyBgworkerEntry->bgw_name; + char dbname[NAMEDATALEN]; + + MyWorkerInfo = (DiskQuotaWorkerEntry *)DatumGetPointer(MyBgworkerEntry->bgw_main_arg); + Assert(MyWorkerInfo != NULL); + + memcpy(dbname, MyWorkerInfo->dbname.data, NAMEDATALEN); /* Disable ORCA to avoid fallback */ optimizer = false; @@ -332,8 +337,6 @@ disk_quota_worker_main(Datum main_arg) pqsignal(SIGTERM, disk_quota_sigterm); pqsignal(SIGUSR1, disk_quota_sigusr1); - MyWorkerInfo = (DiskQuotaWorkerEntry *)DatumGetPointer(MyBgworkerEntry->bgw_main_arg); - Assert(MyWorkerInfo != NULL); if (!MyWorkerInfo->dbEntry->inited) ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); /* @@ -1381,7 +1384,10 @@ start_worker(DiskquotaDBEntry *dbEntry) result = INVALID_DB; goto Failed; } - snprintf(worker.bgw_name, sizeof(worker.bgw_name), "%s", dbname); + /* We do not need to get lock here, since this entry is not used by other process. */ + namestrcpy(&(dq_worker->dbname), dbname); + + snprintf(worker.bgw_name, sizeof(worker.bgw_name), "diskquota bgworker %d", dbEntry->dbid); pfree(dbname); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ diff --git a/src/diskquota.h b/src/diskquota.h index 0bd5ab30743..f044773bd84 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -185,7 +185,8 @@ struct DiskQuotaWorkerEntry { dlist_node node; // the double linked list header - int id; // starts from 0, -1 means invalid + int id; // starts from 0, -1 means invalid + NameData dbname; // the database name. It does not need to be reset, when dbEntry == NULL, dbname is not valid. DiskquotaDBEntry *dbEntry; // pointer to shared memory. DiskquotaLauncherShmem->dbArray }; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 7722765d91f..9805a8e4fc2 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -41,5 +41,6 @@ test: test_default_tablespace test: test_tablespace_diff_schema test: test_worker_schedule test: test_worker_schedule_exception +test: test_dbname_encoding test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_dbname_encoding.out b/tests/regress/expected/test_dbname_encoding.out new file mode 100644 index 00000000000..d7b31373461 --- /dev/null +++ b/tests/regress/expected/test_dbname_encoding.out @@ -0,0 +1,28 @@ +-- create a database with non-ascii characters +CREATE DATABASE 数据库1; +\c 数据库1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check whether current database name is logged. +SELECT + count(logpid) > 0 +FROM + gp_toolkit.__gp_log_master_ext +WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0; + ?column? +---------- + t +(1 row) + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE 数据库1; diff --git a/tests/regress/sql/test_dbname_encoding.sql b/tests/regress/sql/test_dbname_encoding.sql new file mode 100644 index 00000000000..408b6a0a5f1 --- /dev/null +++ b/tests/regress/sql/test_dbname_encoding.sql @@ -0,0 +1,21 @@ +-- create a database with non-ascii characters +CREATE DATABASE 数据库1; + +\c 数据库1 + +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); +-- check whether current database name is logged. +SELECT + count(logpid) > 0 +FROM + gp_toolkit.__gp_log_master_ext +WHERE + position( + '[diskquota] start disk quota worker process to monitor database' in logmessage + ) > 0 + AND position(current_database() in logmessage) > 0; + +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE 数据库1; \ No newline at end of file From e2ab156e0f7b3320245fcdde900eb3b172ed24ba Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 6 Apr 2023 14:14:04 +0800 Subject: [PATCH 276/330] Replace SPI_execute with SPI_cursor_fetch (#322) The result buffer of SPI_execute is limited to 1GB. If the number of rows in diskquota.table_size exceeds 3 * 10^7 (500 segments with 60000 tables for example), SPI_execute("select tableid, size, segid from diskquota.table_size") will raise an error for invalid memory alloc request size 1073741824 (context 'SPI TupTable'). We should fetch table-size entries by portions. Replace SPI_execute() with SPI_cursor_fetch() in load_table_size(). --- src/gp_activetable.c | 80 +++++++++++++++++++++++++++----------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/src/gp_activetable.c b/src/gp_activetable.c index 1002dbd1215..cf3178b3ad4 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -936,16 +936,26 @@ get_active_tables_oid(void) static void load_table_size(HTAB *local_table_stats_map) { - int ret; TupleDesc tupdesc; int i; bool found; TableEntryKey key; DiskQuotaActiveTableEntry *quota_entry; + SPIPlanPtr plan; + Portal portal; + char *sql = "select tableid, size, segid from diskquota.table_size"; - ret = SPI_execute("select tableid, size, segid from diskquota.table_size", true, 0); - if (ret != SPI_OK_SELECT) - ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_execute failed: return code %d, error: %m", ret))); + if ((plan = SPI_prepare(sql, 0, NULL)) == NULL) + ereport(ERROR, (errmsg("[diskquota] SPI_prepare(\"%s\") failed", sql))); + if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, true)) == NULL) + ereport(ERROR, (errmsg("[diskquota] SPI_cursor_open(\"%s\") failed", sql))); + + SPI_cursor_fetch(portal, true, 10000); + + if (SPI_tuptable == NULL) + { + ereport(ERROR, (errmsg("[diskquota] load_table_size SPI_cursor_fetch failed"))); + } tupdesc = SPI_tuptable->tupdesc; #if GP_VERSION_NUM < 70000 @@ -975,35 +985,43 @@ load_table_size(HTAB *local_table_stats_map) get_database_name(MyDatabaseId)))); } - /* push the table oid and size into local_table_stats_map */ - for (i = 0; i < SPI_processed; i++) + while (SPI_processed > 0) { - HeapTuple tup = SPI_tuptable->vals[i]; - Datum dat; - Oid reloid; - int64 size; - int16 segid; - bool isnull; - - dat = SPI_getbinval(tup, tupdesc, 1, &isnull); - if (isnull) continue; - reloid = DatumGetObjectId(dat); - - dat = SPI_getbinval(tup, tupdesc, 2, &isnull); - if (isnull) continue; - size = DatumGetInt64(dat); - dat = SPI_getbinval(tup, tupdesc, 3, &isnull); - if (isnull) continue; - segid = DatumGetInt16(dat); - key.reloid = reloid; - key.segid = segid; - - quota_entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); - quota_entry->reloid = reloid; - quota_entry->tablesize = size; - quota_entry->segid = segid; + /* push the table oid and size into local_table_stats_map */ + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + Oid reloid; + int64 size; + int16 segid; + bool isnull; + + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) continue; + reloid = DatumGetObjectId(dat); + + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); + if (isnull) continue; + size = DatumGetInt64(dat); + dat = SPI_getbinval(tup, tupdesc, 3, &isnull); + if (isnull) continue; + segid = DatumGetInt16(dat); + key.reloid = reloid; + key.segid = segid; + + quota_entry = (DiskQuotaActiveTableEntry *)hash_search(local_table_stats_map, &key, HASH_ENTER, &found); + quota_entry->reloid = reloid; + quota_entry->tablesize = size; + quota_entry->segid = segid; + } + SPI_freetuptable(SPI_tuptable); + SPI_cursor_fetch(portal, true, 10000); } - return; + + SPI_freetuptable(SPI_tuptable); + SPI_cursor_close(portal); + SPI_freeplan(plan); } /* From 4a73b1975b745792d2cf048721e643298d4f3449 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 10 Apr 2023 16:11:05 +0800 Subject: [PATCH 277/330] Split update SQL statement for diskquota.table_size (#325) Fix issue: #318 If the number of entries needed to be updated in diskquota.table_size is too many, the size of StringBuffer will exceed 1GB and raise an error `Cannot enlarge string buffer containing 1073741807 bytes by 20 more bytes.`. We should limit the number of entries in the update statement to 1000000 every time to ensure the string buffer size does not exceed 1GB. --- src/quotamodel.c | 109 ++++++++++++++++++++++++++++++----------------- 1 file changed, 69 insertions(+), 40 deletions(-) diff --git a/src/quotamodel.c b/src/quotamodel.c index 8b7a97570b5..c642759bf06 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -53,6 +53,8 @@ #define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 6 +/* Number of entries for diskquota.table_size update SQL */ +#define SQL_MAX_VALUES_NUMBER 1000000 /* TableSizeEntry macro function */ /* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into @@ -1122,6 +1124,40 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) } } +static void +delete_from_table_size_map(char *str) +{ + StringInfoData delete_statement; + int ret; + + initStringInfo(&delete_statement); + appendStringInfo(&delete_statement, + "WITH deleted_table AS ( VALUES %s ) " + "delete from diskquota.table_size " + "where (tableid, segid) in ( SELECT * FROM deleted_table );", + str); + ret = SPI_execute(delete_statement.data, false, 0); + if (ret != SPI_OK_DELETE) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] delete_from_table_size_map SPI_execute failed: error code %d", ret))); + pfree(delete_statement.data); +} + +static void +insert_into_table_size_map(char *str) +{ + StringInfoData insert_statement; + int ret; + + initStringInfo(&insert_statement); + appendStringInfo(&insert_statement, "insert into diskquota.table_size values %s;", str); + ret = SPI_execute(insert_statement.data, false, 0); + if (ret != SPI_OK_INSERT) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("[diskquota] insert_into_table_size_map SPI_execute failed: error code %d", ret))); + pfree(insert_statement.data); +} + /* * Flush the table_size_map to user table diskquota.table_size * To improve update performance, we first delete all the need_to_flush @@ -1135,10 +1171,8 @@ flush_to_table_size(void) TableSizeEntry *tsentry = NULL; StringInfoData delete_statement; StringInfoData insert_statement; - StringInfoData deleted_table_expr; - bool delete_statement_flag = false; - bool insert_statement_flag = false; - int ret; + int delete_entries_num = 0; + int insert_entries_num = 0; /* TODO: Add flush_size_interval to avoid flushing size info in every loop */ @@ -1146,12 +1180,7 @@ flush_to_table_size(void) bool old_optimizer = optimizer; optimizer = false; - initStringInfo(&deleted_table_expr); - appendStringInfo(&deleted_table_expr, "WITH deleted_table AS ( VALUES "); - initStringInfo(&insert_statement); - appendStringInfo(&insert_statement, "insert into diskquota.table_size values "); - initStringInfo(&delete_statement); hash_seq_init(&iter, table_size_map); @@ -1164,17 +1193,39 @@ flush_to_table_size(void) /* delete dropped table from both table_size_map and table table_size */ if (!get_table_size_entry_flag(tsentry, TABLE_EXIST)) { - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->key.reloid, i); - delete_statement_flag = true; + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, i); + delete_entries_num++; + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } } /* update the table size by delete+insert in table table_size */ else if (TableSizeEntryGetFlushFlag(tsentry, i)) { - appendStringInfo(&deleted_table_expr, "(%u,%d), ", tsentry->key.reloid, i); - appendStringInfo(&insert_statement, "(%u,%ld,%d), ", tsentry->key.reloid, - TableSizeEntryGetSize(tsentry, i), i); - delete_statement_flag = true; - insert_statement_flag = true; + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, i); + appendStringInfo(&insert_statement, "%s(%u,%ld,%d)", (insert_entries_num == 0) ? " " : ", ", + tsentry->key.reloid, TableSizeEntryGetSize(tsentry, i), i); + delete_entries_num++; + insert_entries_num++; + + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + if (insert_entries_num > SQL_MAX_VALUES_NUMBER) + { + insert_into_table_size_map(insert_statement.data); + resetStringInfo(&insert_statement); + insert_entries_num = 0; + } + TableSizeEntryResetFlushFlag(tsentry, i); } } @@ -1184,36 +1235,14 @@ flush_to_table_size(void) pg_atomic_fetch_sub_u32(diskquota_table_size_entry_num, 1); } } - truncateStringInfo(&deleted_table_expr, deleted_table_expr.len - strlen(", ")); - truncateStringInfo(&insert_statement, insert_statement.len - strlen(", ")); - appendStringInfo(&deleted_table_expr, ")"); - appendStringInfo(&insert_statement, ";"); - if (delete_statement_flag) - { - /* concatenate all the need_to_flush table to SQL string */ - appendStringInfoString(&delete_statement, (const char *)deleted_table_expr.data); - appendStringInfoString( - &delete_statement, - "delete from diskquota.table_size where (tableid, segid) in ( SELECT * FROM deleted_table );"); - ret = SPI_execute(delete_statement.data, false, 0); - if (ret != SPI_OK_DELETE) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); - } - if (insert_statement_flag) - { - ret = SPI_execute(insert_statement.data, false, 0); - if (ret != SPI_OK_INSERT) - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("[diskquota] flush_to_table_size SPI_execute failed: error code %d", ret))); - } + if (delete_entries_num) delete_from_table_size_map(delete_statement.data); + if (insert_entries_num) insert_into_table_size_map(insert_statement.data); optimizer = old_optimizer; pfree(delete_statement.data); pfree(insert_statement.data); - pfree(deleted_table_expr.data); } /* From 2431a13a298e8066db0b01eae7ac57c6a909d29f Mon Sep 17 00:00:00 2001 From: Anton Kurochkin <45575813+woblerr@users.noreply.github.com> Date: Fri, 14 Apr 2023 05:16:29 +0300 Subject: [PATCH 278/330] Update cmake version in README. (#331) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index baaf2ba8e8f..5be27dd5121 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ and their disk usage is isolated between databases. # Development -[cmake](https://cmake.org) (>= 3.18) needs to be installed. +[cmake](https://cmake.org) (>= 3.20) needs to be installed. 1. Build & install disk quota ``` From 51a9038334c155516c475fb17950359c2fa34535 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 17 Apr 2023 14:25:21 +0800 Subject: [PATCH 279/330] Fix flaky test: test_appendonly (#333) --- tests/regress/expected/test_appendonly.out | 10 ++++++++-- tests/regress/expected7/test_appendonly.out | 10 ++++++++-- tests/regress/sql/test_appendonly.sql | 5 +++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/tests/regress/expected/test_appendonly.out b/tests/regress/expected/test_appendonly.out index d7dfc2ee059..010aff751bd 100644 --- a/tests/regress/expected/test_appendonly.out +++ b/tests/regress/expected/test_appendonly.out @@ -47,14 +47,20 @@ SELECT pg_table_size('t_aoco'); (1 row) -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); set_schema_quota ------------------ (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT INTO t_ao SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/expected7/test_appendonly.out b/tests/regress/expected7/test_appendonly.out index c2a61cbbb2d..cfa19a46114 100644 --- a/tests/regress/expected7/test_appendonly.out +++ b/tests/regress/expected7/test_appendonly.out @@ -47,14 +47,20 @@ SELECT pg_table_size('t_aoco'); (1 row) -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); set_schema_quota ------------------ (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT INTO t_ao SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- diff --git a/tests/regress/sql/test_appendonly.sql b/tests/regress/sql/test_appendonly.sql index 40337ebaa90..c1e996bc820 100644 --- a/tests/regress/sql/test_appendonly.sql +++ b/tests/regress/sql/test_appendonly.sql @@ -30,9 +30,10 @@ SELECT tableid::regclass, size SELECT pg_table_size('t_aoco'); -- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '1.2 MB'); +SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); -- expect success. -INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT INTO t_ao SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); From b952190b25c2c5deaf346d87615f83008873a726 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 26 Apr 2023 11:07:45 +0800 Subject: [PATCH 280/330] Skip refresh_disk_quota_model() when receiving a signal (#321) When with often signals to receive diskquota may call refresh_disk_quota_model more frequently than diskquota_naptime. Solution: when receiving a signal, the time waiting for latch sleep_time will be smaller than diskquota_naptime. We should skip refresh_disk_quota_model() when sleep_time < diskquota_naptime. --- src/diskquota.c | 60 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index b46f39f31b0..d5630700fd4 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -485,6 +485,11 @@ disk_quota_worker_main(Datum main_arg) bool is_gang_destroyed = false; TimestampTz log_start_timestamp = GetCurrentTimestamp(); TimestampTz log_end_timestamp; + TimestampTz loop_start_timestamp = 0; + TimestampTz loop_end_timestamp; + long sleep_time = diskquota_naptime * 1000; + long secs; + int usecs; ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); while (!got_sigterm) @@ -504,27 +509,44 @@ disk_quota_worker_main(Datum main_arg) log_start_timestamp = log_end_timestamp; } - SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); - if (!diskquota_is_paused()) - { - /* Refresh quota model with init mode */ - refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); - MyWorkerInfo->dbEntry->inited = true; - is_gang_destroyed = false; - } - else if (!is_gang_destroyed) + /* + * If the bgworker receives a signal, the latch will be set ahead of the diskquota.naptime. + * To avoid too frequent diskquota refresh caused by receiving the signal, we use + * loop_start_timestamp and loop_end_timestamp to maintain the elapsed time since the last + * diskquota refresh. If the latch is set ahead of diskquota.naptime, + * refresh_disk_quota_model() should be skipped. + */ + loop_end_timestamp = GetCurrentTimestamp(); + TimestampDifference(loop_start_timestamp, loop_end_timestamp, &secs, &usecs); + sleep_time += secs * 1000 + usecs / 1000; + if (sleep_time >= diskquota_naptime * 1000) { - DisconnectAndDestroyAllGangs(false); - is_gang_destroyed = true; - } - worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); + SIMPLE_FAULT_INJECTOR("diskquota_worker_main"); + if (!diskquota_is_paused()) + { + /* Refresh quota model with init mode */ + refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); + MyWorkerInfo->dbEntry->inited = true; + is_gang_destroyed = false; + } + else if (!is_gang_destroyed) + { + DisconnectAndDestroyAllGangs(false); + is_gang_destroyed = true; + } + worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); - // GPDB6 opend a MemoryAccount for us without asking us. - // and GPDB6 did not release the MemoryAccount after SPI finish. - // Reset the MemoryAccount although we never create it. + // GPDB6 opend a MemoryAccount for us without asking us. + // and GPDB6 did not release the MemoryAccount after SPI finish. + // Reset the MemoryAccount although we never create it. #if GP_VERSION_NUM < 70000 - MemoryAccounting_Reset(); + MemoryAccounting_Reset(); #endif /* GP_VERSION_NUM */ + + sleep_time = 0; + } + loop_start_timestamp = GetCurrentTimestamp(); + if (DiskquotaLauncherShmem->isDynamicWorker) { break; @@ -538,7 +560,7 @@ disk_quota_worker_main(Datum main_arg) * background process goes away immediately in an emergency. */ rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - diskquota_naptime * 1000L); + diskquota_naptime * 1000 - sleep_time); ResetLatch(&MyProc->procLatch); // be nice to scheduler when naptime == 0 and diskquota_is_paused() == true @@ -563,8 +585,6 @@ disk_quota_worker_main(Datum main_arg) ereport(LOG, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); ereport(DEBUG1, (errmsg("[diskquota] stop disk quota worker process to monitor database:%s", dbname))); #if DISKQUOTA_DEBUG - long secs; - int usecs; TimestampDifference(MyWorkerInfo->dbEntry->last_run_time, GetCurrentTimestamp(), &secs, &usecs); MyWorkerInfo->dbEntry->cost = secs * 1000L + usecs / 1000L; #endif From e589152ffede5452c3dcc50e28d9d2a7dcbb7380 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 26 Apr 2023 17:12:24 +0800 Subject: [PATCH 281/330] Allocate more shared memory to quota_info (#334) Allocate more SHM to quota_info, so that the user can set more quota configurations. --- src/quotamodel.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/quotamodel.c b/src/quotamodel.c index c642759bf06..6b8507b3810 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -55,6 +55,8 @@ #define NUM_QUOTA_CONFIG_ATTRS 6 /* Number of entries for diskquota.table_size update SQL */ #define SQL_MAX_VALUES_NUMBER 1000000 +/* Number of entries for hash table in quota_info */ +#define MAX_QUOTA_MAP_ENTRIES (128 * 1024L) /* TableSizeEntry macro function */ /* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into @@ -508,7 +510,7 @@ diskquota_worker_shmem_size() Size size; size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / MAX_NUM_MONITORED_DB + 100, sizeof(TableSizeEntry)); size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); - size = add_size(size, hash_estimate_size(1024L, sizeof(struct QuotaMapEntry)) * NUM_QUOTA_TYPES); + size = add_size(size, hash_estimate_size(MAX_QUOTA_MAP_ENTRIES * NUM_QUOTA_TYPES, sizeof(struct QuotaMapEntry))); return size; } @@ -575,7 +577,8 @@ init_disk_quota_model(uint32 id) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.entrysize = sizeof(struct QuotaMapEntry); hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, HASH_ELEM, + DISKQUOTA_TAG_HASH); } pfree(str.data); } @@ -640,7 +643,8 @@ vacuum_disk_quota_model(uint32 id) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.entrysize = sizeof(struct QuotaMapEntry); hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, 1024L, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); + quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, HASH_ELEM, + DISKQUOTA_TAG_HASH); hash_seq_init(&iter, quota_info[type].map); while ((qentry = hash_seq_search(&iter)) != NULL) { From f41e75a509adea024298785d220dc96262a51661 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 26 Apr 2023 19:00:30 +0800 Subject: [PATCH 282/330] Enable stanby test for 6X_STABLE (#335) --- concourse/scripts/test_diskquota.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 87c48663458..d50ae745c4e 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -27,8 +27,12 @@ function _main() { export SHOW_REGRESS_DIFF=1 time cmake --build . --target installcheck # Run test again with standby master - # activate_standby - # time cmake --build . --target installcheck + # FIXME: enable test for GPDB7 + if [[ $PGPORT -eq 6000 ]] + then + activate_standby + time cmake --build . --target installcheck + fi # Run upgrade test (with standby master) time cmake --build . --target upgradecheck popd From 5e4e123ea8e8ed201ec97263249438476dd37bcd Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 27 Apr 2023 11:59:58 +0800 Subject: [PATCH 283/330] Bump version to 2.2.1 (#336) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index ccbccc3dc62..c043eea7767 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.0 +2.2.1 From 8d2e825dfd2e01fe88d148a731df312d7cec3bd9 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Thu, 27 Apr 2023 14:19:04 +0800 Subject: [PATCH 284/330] Use install_gpdb_component in CI test (#337) So the script can be tested as well. --- concourse/scripts/test_diskquota.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index d50ae745c4e..85b2bce157d 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -15,7 +15,12 @@ function activate_standby() { } function _main() { - tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C /usr/local/greenplum-db-devel + local tmp_dir="$(mktemp -d)" + tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C "$tmp_dir" + pushd "$tmp_dir" + ./install_gpdb_component + popd + source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh pushd /home/gpadmin/gpdb_src From d4ecc282adafe5d1e7651329724af6d35ed0ad20 Mon Sep 17 00:00:00 2001 From: Xing Guo Date: Thu, 25 May 2023 17:30:00 +0800 Subject: [PATCH 285/330] Fix compiler warnings. (#339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Note: `TextDatumGetCString()` returns a null-terminated CString. ``` [ 12%] Building C object CMakeFiles/diskquota.dir/src/diskquota.c.o In file included from /home/v/workspace/diskquota/src/diskquota.h:16, from /home/v/workspace/diskquota/src/diskquota.c:18: /home/v/workspace/diskquota/src/diskquota.c: In function ‘diskquota_status_schema_version’: /home/v/.local/gpdb7/include/postgresql/server/c.h:957:25: warning: ‘strncpy’ specified bound 64 equals destinatio n size [-Wstringop-truncation] 957 | strncpy(_dst, (src), _len); \ | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /home/v/workspace/diskquota/src/diskquota.c:1548:9: note: in expansion of macro ‘StrNCpy’ 1548 | StrNCpy(version, vv, sizeof(version)); | ^~~~~~~ ``` Co-authored-by: Hao Zhang --- src/diskquota.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index d5630700fd4..04f27f5ded4 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -1514,9 +1514,6 @@ diskquota_status_binary_version() static const char * diskquota_status_schema_version() { - static char version[64] = {0}; - memset(version, 0, sizeof(version)); - int ret = SPI_connect(); Assert(ret = SPI_OK_CONNECT); @@ -1526,30 +1523,31 @@ diskquota_status_schema_version() { ereport(WARNING, (errmsg("[diskquota] when reading installed version lines %ld code = %d", SPI_processed, ret))); - goto out; + goto fail; } if (SPI_processed == 0) { - goto out; + goto fail; } - bool is_null = false; - Datum v = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); + bool is_null = false; + Datum version_datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &is_null); Assert(is_null == false); - char *vv = TextDatumGetCString(v); - if (vv == NULL) + char *version = TextDatumGetCString(version_datum); + if (version == NULL || *version == '\0') { ereport(WARNING, (errmsg("[diskquota] 'extversion' is empty in pg_class.pg_extension. may catalog corrupted"))); - goto out; + goto fail; } - StrNCpy(version, vv, sizeof(version)); - -out: SPI_finish(); return version; + +fail: + SPI_finish(); + return ""; } PG_FUNCTION_INFO_V1(diskquota_status); From 1111f71109957d493a903c8ed9b7b7071a6bf6b2 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 26 Jun 2023 13:31:06 +0800 Subject: [PATCH 286/330] Fix bug: diskquota.status() (#344) Fix bug produced by #339 Co-authored-by: Xing Guo admin@higuoxing.com --- src/diskquota.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 04f27f5ded4..0d15d9fc85c 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -1514,7 +1514,8 @@ diskquota_status_binary_version() static const char * diskquota_status_schema_version() { - int ret = SPI_connect(); + static char ret_version[64]; + int ret = SPI_connect(); Assert(ret = SPI_OK_CONNECT); ret = SPI_execute("select extversion from pg_extension where extname = 'diskquota'", true, 0); @@ -1542,8 +1543,10 @@ diskquota_status_schema_version() goto fail; } + StrNCpy(ret_version, version, sizeof(ret_version) - 1); + SPI_finish(); - return version; + return ret_version; fail: SPI_finish(); From 58bb9e25f40695d4faf9e8763db3b20f9e823251 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 26 Jun 2023 14:33:38 +0800 Subject: [PATCH 287/330] fix pipeline (#345) fix pipeline resource --- concourse/pipeline/res_def.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 4e6578d856a..9920270e8f1 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -172,25 +172,25 @@ resources: source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-centos6.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-centos6.tar.gz - name: bin_gpdb6_centos7 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-centos7.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-centos7.tar.gz - name: bin_gpdb6_rhel8 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-rhel8.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-rhel8.tar.gz - name: bin_gpdb6_ubuntu18 type: gcs source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-ubuntu18.04.tar.gz + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-ubuntu18.04.tar.gz - name: bin_gpdb7_el8 type: gcs source: From c2686c900367ff84af84cc741b31107812976ee5 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 27 Jun 2023 17:46:14 +0800 Subject: [PATCH 288/330] Reduce the number of the log in bgworker. (#346) --- src/diskquota.c | 17 +++++++++++------ src/diskquota.h | 2 ++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 0d15d9fc85c..5099b713ff4 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -338,7 +338,13 @@ disk_quota_worker_main(Datum main_arg) pqsignal(SIGUSR1, disk_quota_sigusr1); if (!MyWorkerInfo->dbEntry->inited) + { + MyWorkerInfo->dbEntry->last_log_time = GetCurrentTimestamp(); ereport(LOG, (errmsg("[diskquota] start disk quota worker process to monitor database:%s", dbname))); + } + /* To avoid last_log_time from being uninitialized. */ + if (MyWorkerInfo->dbEntry->last_log_time > GetCurrentTimestamp()) + MyWorkerInfo->dbEntry->last_log_time = GetCurrentTimestamp(); /* * The shmem exit hook is registered after registering disk_quota_sigterm. * So if the SIGTERM arrives before this statement, the shmem exit hook @@ -482,11 +488,10 @@ disk_quota_worker_main(Datum main_arg) if (!MyWorkerInfo->dbEntry->inited) update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_RUNNING); - bool is_gang_destroyed = false; - TimestampTz log_start_timestamp = GetCurrentTimestamp(); - TimestampTz log_end_timestamp; + bool is_gang_destroyed = false; TimestampTz loop_start_timestamp = 0; TimestampTz loop_end_timestamp; + TimestampTz log_time; long sleep_time = diskquota_naptime * 1000; long secs; int usecs; @@ -502,11 +507,11 @@ disk_quota_worker_main(Datum main_arg) * every BGWORKER_LOG_TIME to ensure that we can find the database name * by the bgworker's pid in the log file. */ - log_end_timestamp = GetCurrentTimestamp(); - if (TimestampDifferenceExceeds(log_start_timestamp, log_end_timestamp, BGWORKER_LOG_TIME)) + log_time = GetCurrentTimestamp(); + if (TimestampDifferenceExceeds(log_time, MyWorkerInfo->dbEntry->last_log_time, BGWORKER_LOG_TIME)) { ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); - log_start_timestamp = log_end_timestamp; + MyWorkerInfo->dbEntry->last_log_time = log_time; } /* diff --git a/src/diskquota.h b/src/diskquota.h index f044773bd84..7baeaf1b3be 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -217,6 +217,8 @@ struct DiskquotaDBEntry bool inited; // this entry is inited, will set to true after the worker finish the frist run. bool in_use; // this slot is in using. AKA dbid != 0 + + TimestampTz last_log_time; // the last time log current database info. }; typedef enum MonitorDBStatus From 0a837c89e131c88a2b05bc626ac35f22f358e577 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 27 Jun 2023 17:54:08 +0800 Subject: [PATCH 289/330] Fix bug: lose monitored_dbid_cache after switching mirror (#342) # Problem Recording active tables and permission checking on segments rely on `monitored_dbid_cache`. But after mirror switching, the data in shared memory is lost, and the above behaviors will be blocked. # Solution Segments update their `monitored_dbid_cache` after receiving pull_active_tables_oid requests every naptime. --- src/diskquota.h | 1 + src/gp_activetable.c | 2 + src/monitored_db.c | 29 +++++++++++ .../regress/expected/test_primary_failure.out | 51 +++++++++++++++---- tests/regress/sql/test_primary_failure.in.sql | 27 +++++++--- 5 files changed, 95 insertions(+), 15 deletions(-) diff --git a/src/diskquota.h b/src/diskquota.h index 7baeaf1b3be..8e46d31732a 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -291,4 +291,5 @@ extern HTAB *diskquota_hash_create(const char *tabname, long nelem, HASHC DiskquotaHashFunction hashFunction); extern HTAB *DiskquotaShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags, DiskquotaHashFunction hash_function); +extern void refresh_monitored_dbid_cache(void); #endif diff --git a/src/gp_activetable.c b/src/gp_activetable.c index cf3178b3ad4..a0d1c524019 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -749,6 +749,8 @@ get_active_tables_oid(void) Oid relOid; + refresh_monitored_dbid_cache(); + memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(DiskQuotaActiveTableFileEntry); ctl.entrysize = sizeof(DiskQuotaActiveTableFileEntry); diff --git a/src/monitored_db.c b/src/monitored_db.c index f23cb9a9464..c2b29a1bad9 100644 --- a/src/monitored_db.c +++ b/src/monitored_db.c @@ -336,3 +336,32 @@ dump_monitored_dbid_cache(long *nitems) Assert(count == 0); return entries; } + +/* + * After primary failure and mirror switching, the monitored_dbid_cache + * is lost on segments. We should refresh the monitored_dbid_cache during + * every diskquota refresh procedure. + */ +void +refresh_monitored_dbid_cache(void) +{ + bool found; + Oid dbid = MyDatabaseId; + LWLockAcquire(diskquota_locks.monitored_dbid_cache_lock, LW_EXCLUSIVE); + MonitorDBEntry entry = hash_search(monitored_dbid_cache, &dbid, HASH_ENTER_NULL, &found); + if (entry == NULL) + { + ereport(WARNING, (errmsg("can't alloc memory on dbid cache, there are too many databases to monitor"))); + } + else if (!found) + { + entry->paused = false; + pg_atomic_init_u32(&(entry->epoch), 0); + pg_atomic_init_u32(&(entry->status), DB_RUNNING); + ereport(LOG, (errmsg("the entry in monitored_dbid_cache is lost due to mirror switching and is added back now, " + "dbid: %d", + dbid))); + } + + LWLockRelease(diskquota_locks.monitored_dbid_cache_lock); +} diff --git a/tests/regress/expected/test_primary_failure.out b/tests/regress/expected/test_primary_failure.out index 5f5c18a2360..48160e24616 100644 --- a/tests/regress/expected/test_primary_failure.out +++ b/tests/regress/expected/test_primary_failure.out @@ -36,19 +36,27 @@ returns text as $$ else: return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') $$ language plpython2u; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO a SELECT generate_series(1,100); -INSERT INTO a SELECT generate_series(1,100000); +CREATE TABLE a(i int, j int) DISTRIBUTED BY (i); +-- the entries will be inserted into seg0 +INSERT INTO a SELECT 2, generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch --------------------------- t (1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + tableid | size | segid +---------+---------+------- + a | 3735552 | -1 + a | 3735552 | 0 + a | 0 | 1 + a | 0 | 2 +(4 rows) + -- expect insert fail -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); ERROR: schema's disk space quota exceeded with name: ftsr -- now one of primary is down select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); @@ -74,7 +82,7 @@ select content, preferred_role, role, status, mode from gp_segment_configuration (2 rows) -- expect insert fail -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); ERROR: schema's disk space quota exceeded with name: ftsr -- increase quota SELECT diskquota.set_schema_quota('ftsr', '200 MB'); @@ -83,6 +91,31 @@ SELECT diskquota.set_schema_quota('ftsr', '200 MB'); (1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect insert success +INSERT INTO a SELECT 2, generate_series(1,10000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check whether monitored_dbid_cache is refreshed in mirror +-- diskquota.table_size should be updated +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + tableid | size | segid +---------+---------+------- + a | 4096000 | -1 + a | 4096000 | 0 + a | 0 | 1 + a | 0 | 2 +(4 rows) + -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); @@ -256,10 +289,10 @@ SELECT diskquota.wait_for_worker_new_epoch(); SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; quota_in_mb | nspsize_in_bytes -------------+------------------ - 200 | 3932160 + 200 | 4096000 (1 row) -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); DROP TABLE a; DROP SCHEMA ftsr CASCADE; NOTICE: drop cascades to 2 other objects diff --git a/tests/regress/sql/test_primary_failure.in.sql b/tests/regress/sql/test_primary_failure.in.sql index cbac6e4cde8..2dd2689b6e7 100644 --- a/tests/regress/sql/test_primary_failure.in.sql +++ b/tests/regress/sql/test_primary_failure.in.sql @@ -33,12 +33,16 @@ returns text as $$ return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, encoding='utf8').replace('.', '') $$ language @PLPYTHON_LANG_STR@; -CREATE TABLE a(i int) DISTRIBUTED BY (i); -INSERT INTO a SELECT generate_series(1,100); -INSERT INTO a SELECT generate_series(1,100000); +CREATE TABLE a(i int, j int) DISTRIBUTED BY (i); +-- the entries will be inserted into seg0 +INSERT INTO a SELECT 2, generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); + +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + -- expect insert fail -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); -- now one of primary is down select pg_ctl((select datadir from gp_segment_configuration c where c.role='p' and c.content=0), 'stop'); @@ -50,11 +54,22 @@ select gp_request_fts_probe_scan(); select content, preferred_role, role, status, mode from gp_segment_configuration where content = 0; -- expect insert fail -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); -- increase quota SELECT diskquota.set_schema_quota('ftsr', '200 MB'); +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect insert success +INSERT INTO a SELECT 2, generate_series(1,10000); + +SELECT diskquota.wait_for_worker_new_epoch(); + +-- check whether monitored_dbid_cache is refreshed in mirror +-- diskquota.table_size should be updated +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'a'::regclass ORDER BY segid; + -- pull up failed primary -- start_ignore select pg_recoverseg((select datadir from gp_segment_configuration c where c.role='p' and c.content=-1), 'a'); @@ -67,7 +82,7 @@ select content, preferred_role, role, status, mode from gp_segment_configuration SELECT diskquota.wait_for_worker_new_epoch(); SELECT quota_in_mb, nspsize_in_bytes from diskquota.show_fast_schema_quota_view where schema_name='ftsr'; -INSERT INTO a SELECT generate_series(1,100); +INSERT INTO a SELECT 2, generate_series(1,100); DROP TABLE a; DROP SCHEMA ftsr CASCADE; From 05da9d4dbb78373ab2c38ad576b22f637cc159ec Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 27 Jun 2023 17:55:14 +0800 Subject: [PATCH 290/330] Enable continuous upgrade. (#340) To achieve upgrading directly from 2.0 to 2.2, we should do the following things: - cherry-pick this PR in diskquota-2.0, diskquota-2.1 and diskquota-2.2. - set the shared_preload_libraries as `diskquota-2.2`: `gpconfig -c shared_preload_libraries -v 'diskquota-2.2'` - restart cluster: `gpstop -ar` - execute the following SQLs: ``` ALTER extension diskquota update to '2.2'; ``` --- src/diskquota.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/diskquota.c b/src/diskquota.c index 5099b713ff4..3c4fa634a88 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -42,6 +42,7 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/timestamp.h" +#include "tcop/pquery.h" PG_MODULE_MAGIC; @@ -175,6 +176,28 @@ _PG_init(void) /* diskquota.so must be in shared_preload_libraries to init SHM. */ if (!process_shared_preload_libraries_in_progress) { + /* + * To support the continuous upgrade/downgrade, we should skip the library + * check in _PG_init() during upgrade/downgrade. If the POSTGRES backend + * process is in normal mode and meets one of the following conditions, we + * skip the library check: + * - The backend is not a QD. We only need to check the library on QD. + * - The current command is `ALTER EXTENSION`. + */ + if (IsNormalProcessingMode()) + { + if (Gp_role != GP_ROLE_DISPATCH) + { + ereport(WARNING, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME + " not in shared_preload_libraries."))); + return; + } + if (ActivePortal && ActivePortal->sourceTag == T_AlterExtensionStmt) + { + ereport(LOG, (errmsg("[diskquota] altering diskquota version to " DISKQUOTA_VERSION "."))); + return; + } + } ereport(ERROR, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort."))); } From c81e69600fa60bad2423e219e0ce7179f6f6edaa Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 28 Jun 2023 11:35:21 +0800 Subject: [PATCH 291/330] Fix pipeline. (#349) Get the latest package version for release resource. --- concourse/pipeline/res_def.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 9920270e8f1..f53bb0a30b6 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -196,7 +196,7 @@ resources: source: bucket: pivotal-gpdb-concourse-resources-prod json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb7/greenplum-db-server-7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev.*-el8.tar.gz + regexp: server/release-candidates/gpdb7/greenplum-db-server-(7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-el8.tar.gz # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 From e3622e186c360dcd2b9f9e6ef4a399f3a54f8b7a Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 28 Jun 2023 14:39:58 +0800 Subject: [PATCH 292/330] Fix upgrade version check (#347) - Because of 97f1f9b46b, the sql file directory has been changed. The sql version check wouldn't work since it cannot find the sql files anymore. Change it to the correct ddl directory. - 'exec_program' is deprecated, use 'execute_process' instead. - 'git tag | sort' returns the latest version among all branches, but not the closest tag to the current commit. Use 'git describe --tags' instead. So the upgrade version check will work for the 2.1.x patch release. --- upgrade_test/CMakeLists.txt | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index 1e3a256283a..f151bd66c02 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -30,21 +30,24 @@ regresstarget_add( REGRESS_OPTS --dbname=contrib_regression) -exec_program( - git ${CMAKE_SOURCE_DIR} ARGS - tag | sort --version-sort -r | head -n 1 +execute_process( + COMMAND git describe --tags --abbrev=0 + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE latest_tag + OUTPUT_STRIP_TRAILING_WHITESPACE ) # check whether DDL file (*.sql) is modified -file(GLOB ddl_files ${CMAKE_SOURCE_DIR}/*.sql) +file(GLOB ddl_files ${DISKQUOTA_DDL_DIR}/*.sql) foreach(ddl IN LISTS ddl_files) cmake_path(GET ddl FILENAME ddl) - exec_program( - git ${CMAKE_SOURCE_DIR} ARGS - diff ${latest_tag} --exit-code ${ddl} - OUTPUT_VARIABLE NULL - RETURN_VALUE "${ddl}_modified") + execute_process( + COMMAND + git diff ${latest_tag} --exit-code ${ddl} + OUTPUT_QUIET + WORKING_DIRECTORY ${DISKQUOTA_DDL_DIR} + RESULT_VARIABLE "${ddl}_modified" + ) if("${${ddl}_modified}") message( From 974876dc6e867391d6316aa4981f3e18cda4fca9 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 28 Jun 2023 15:12:10 +0800 Subject: [PATCH 293/330] Remove gp7 from pipeline (#350) After creating extension, the `diskquota.state` is always `unready` due to the change https://github.com/greenplum-db/gpdb/pull/15239. It makes the test timeout. We disable the job of gp7 in release/pr/merge pipeline until the problem is fixed. --- concourse/pipeline/commit.yml | 2 +- concourse/pipeline/pr.yml | 2 +- concourse/pipeline/release.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml index 823dfcc2868..1891caeba76 100644 --- a/concourse/pipeline/commit.yml +++ b/concourse/pipeline/commit.yml @@ -19,7 +19,7 @@ #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), #@ ubuntu18_gpdb6_conf(), -#@ rhel8_gpdb7_conf(), +#! #@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 4a715120c24..3578131aa8c 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -22,7 +22,7 @@ #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), #@ ubuntu18_gpdb6_conf(), -#@ rhel8_gpdb7_conf(), +#! #@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { diff --git a/concourse/pipeline/release.yml b/concourse/pipeline/release.yml index 023e86bd88f..6b12b935f29 100644 --- a/concourse/pipeline/release.yml +++ b/concourse/pipeline/release.yml @@ -20,7 +20,7 @@ #@ centos7_gpdb6_conf(release_build=True), #@ rhel8_gpdb6_conf(release_build=True), #@ ubuntu18_gpdb6_conf(release_build=True), -#@ rhel8_gpdb7_conf(release_build=True) +#! #@ rhel8_gpdb7_conf(release_build=True) #@ ] jobs: #@ param = { From 6001a059054bc0ab8dd91a1a08d2e040623b7a62 Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 28 Jun 2023 17:12:15 +0800 Subject: [PATCH 294/330] Add alter extension upgrade test (#348) - By #340, diskquota should be able to upgraded directly from any previous version. A script is added to test this. - Modify the cmakefile so before installing/packaing, only previous so files will copied. This would help us to make patch release for 2.0/2.1. --------- Co-authored-by: Zhang Hao --- CMakeLists.txt | 15 ++++++- concourse/scripts/test_diskquota.sh | 2 + upgrade_test/alter_test.sh | 61 +++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100755 upgrade_test/alter_test.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index 38c64a98c3d..d12fa4480a9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -91,8 +91,21 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) if(DEFINED DISKQUOTA_LAST_RELEASE_PATH) message(STATUS "Copy pervious installer from ${DISKQUOTA_LAST_RELEASE_PATH}") file(ARCHIVE_EXTRACT INPUT ${DISKQUOTA_LAST_RELEASE_PATH} PATTERNS "*.so") - file(GLOB DISKQUOTA_PREVIOUS_LIBRARY + file(GLOB so_files_list "${CMAKE_BINARY_DIR}/lib/postgresql/*.so") + + foreach(so_path IN LISTS so_files_list) + get_filename_component(so_name ${so_path} NAME_WLE) + # Replace 'diskquota-x.y' with 'x.y'. 'diskquota' won't be replaced, which belongs to 1.x release. + string(REPLACE "diskquota-" "" so_ver ${so_name}) + # Install the previous so files. Those so files have versions less than current version. + # diskqutoa.so doesn't have version string in the file name. It belongs to 1.x release. + if((${so_ver} STREQUAL "diskquota") OR + ${so_ver} VERSION_LESS ${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}) + list(APPEND DISKQUOTA_PREVIOUS_LIBRARY ${so_path}) + endif() + endforeach() + install(PROGRAMS ${DISKQUOTA_PREVIOUS_LIBRARY} DESTINATION "lib/postgresql/") get_filename_component( diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 85b2bce157d..6abe35d04d6 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -41,6 +41,8 @@ function _main() { # Run upgrade test (with standby master) time cmake --build . --target upgradecheck popd + + time /home/gpadmin/diskquota_src/upgrade_test/alter_test.sh } _main diff --git a/upgrade_test/alter_test.sh b/upgrade_test/alter_test.sh new file mode 100755 index 00000000000..11c7afd72a2 --- /dev/null +++ b/upgrade_test/alter_test.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Test if all the previous diskquota minor versions can be directly upgraded +# to the current version. + +set -ex + +SCRIPT_PATH="${BASH_SOURCE[0]}" +SRC_DIR="$(cd "$(dirname "${SCRIPT_PATH}")"/.. >/dev/null 2>&1 && pwd)" + +# Versions like major.minor +CUR_VERSION=$(cut --delimiter="." --fields=1-2 "${SRC_DIR}/VERSION") +ALL_VERSIONS=$(cd "${SRC_DIR}" && git tag | cut --delimiter="." --fields=1-2 | sort -V -u) +VERSIONS_TO_TEST=() + +test_alter_from() { + local from_ver=$1 + local to_ver=$CUR_VERSION + + gpconfig -c shared_preload_libraries -v "" + gpstop -rai + dropdb diskquota --if-exists + dropdb diskquota_alter_test --if-exists + createdb diskquota + + local from_so_name="diskquota" + if [ "${from_ver}" != "1.0" ];then + from_so_name="diskquota-${from_ver}" + fi + local to_so_name="diskquota-${to_ver}" + + # Preload the old diskquota so + gpconfig -c shared_preload_libraries -v "${from_so_name}" + gpstop -rai + + createdb diskquota_alter_test + + # Test if the extension and be upgraded directly + psql -d diskquota_alter_test -c "CREATE EXTENSION diskquota version '${from_ver}'" + + # Preload the new diskquota so + gpconfig -c shared_preload_libraries -v "${to_so_name}" + gpstop -rai + + psql -d diskquota_alter_test -c "ALTER EXTENSION diskquota update to '${to_ver}'" + psql -d diskquota_alter_test -c "DROP EXTENSION diskquota" +} + +# Find all minor versions before current one +while IFS= read -r ver; do + if [ "${ver}" = "${CUR_VERSION}" ]; then + break + fi + if [ "${ver}" = "0.8" ]; then + continue + fi + VERSIONS_TO_TEST+=("${ver}") +done <<< "$ALL_VERSIONS" + +for from_ver in "${VERSIONS_TO_TEST[@]}"; do + test_alter_from "${from_ver}" +done From 2aefa8028e13c943ef1674627ed22fb29bfdbf45 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 29 Jun 2023 11:31:52 +0800 Subject: [PATCH 295/330] Add a sleep in alter_test.sh (#351) Co-authored-by: Chen Mulong chenmulong@gmail.com --- upgrade_test/alter_test.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/upgrade_test/alter_test.sh b/upgrade_test/alter_test.sh index 11c7afd72a2..96e29af0684 100755 --- a/upgrade_test/alter_test.sh +++ b/upgrade_test/alter_test.sh @@ -42,6 +42,9 @@ test_alter_from() { gpstop -rai psql -d diskquota_alter_test -c "ALTER EXTENSION diskquota update to '${to_ver}'" + # Sleep wait for bgworker starting, otherwise, we will get a warning + # 'cannot remove the database from db list, dbid not found'. + sleep 5 psql -d diskquota_alter_test -c "DROP EXTENSION diskquota" } From 86ff586b5f7018983e7348db06ad87c8d6f73359 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 29 Jun 2023 16:31:13 +0800 Subject: [PATCH 296/330] Update to 2.2.2 (#352) - bump version to 2.2.2 - Modify the check procedure of alter extension. --- VERSION | 2 +- src/diskquota.c | 57 ++++++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/VERSION b/VERSION index c043eea7767..b1b25a5ffae 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.1 +2.2.2 diff --git a/src/diskquota.c b/src/diskquota.c index 3c4fa634a88..dd68143775c 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -42,6 +42,7 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/timestamp.h" +#include "utils/formatting.h" #include "tcop/pquery.h" PG_MODULE_MAGIC; @@ -147,6 +148,7 @@ static void free_bgworker_handle(uint32 worker_id); /* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); #endif /* GP_VERSION_NUM */ +static bool is_altering_extension(void); /* * diskquota_launcher_shmem_size @@ -163,6 +165,40 @@ diskquota_launcher_shmem_size(void) size = add_size(size, mul_size(MAX_NUM_MONITORED_DB, sizeof(struct DiskquotaDBEntry))); // hidden memory for dbArray return size; } + +static bool +is_altering_extension(void) +{ + if (ActivePortal == NULL) return false; + /* QD: When the sourceTag is T_AlterExtensionStmt, then return true */ + if (ActivePortal->sourceTag == T_AlterExtensionStmt) return true; + + /* + * QE: The sourceTag won't be T_AlterExtensionStmt, we should check the sourceText. + * If the sourceText contains 'alter extension diskquota update', we consider it is + * a alter extension query. + */ + char *query = asc_tolower(ActivePortal->sourceText, strlen(ActivePortal->sourceText)); + char *pos; + bool match = true; + + pos = strstr(query, "alter"); + if (pos) + pos = strstr(pos, "extension"); + else + match = false; + if (pos) + pos = strstr(pos, "diskquota"); + else + match = false; + if (pos) + pos = strstr(pos, "update"); + else + match = false; + pfree(query); + return match; +} + /* * Entrypoint of diskquota module. * @@ -178,25 +214,12 @@ _PG_init(void) { /* * To support the continuous upgrade/downgrade, we should skip the library - * check in _PG_init() during upgrade/downgrade. If the POSTGRES backend - * process is in normal mode and meets one of the following conditions, we - * skip the library check: - * - The backend is not a QD. We only need to check the library on QD. - * - The current command is `ALTER EXTENSION`. + * check in _PG_init() during upgrade/downgrade. */ - if (IsNormalProcessingMode()) + if (IsNormalProcessingMode() && is_altering_extension()) { - if (Gp_role != GP_ROLE_DISPATCH) - { - ereport(WARNING, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME - " not in shared_preload_libraries."))); - return; - } - if (ActivePortal && ActivePortal->sourceTag == T_AlterExtensionStmt) - { - ereport(LOG, (errmsg("[diskquota] altering diskquota version to " DISKQUOTA_VERSION "."))); - return; - } + ereport(LOG, (errmsg("[diskquota] altering diskquota version to " DISKQUOTA_VERSION "."))); + return; } ereport(ERROR, (errmsg("[diskquota] booting " DISKQUOTA_VERSION ", but " DISKQUOTA_BINARY_NAME " not in shared_preload_libraries. abort."))); From f1ca0c5ad9680f780622efab4e600d8b5fd13beb Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Mon, 3 Jul 2023 14:53:52 +0800 Subject: [PATCH 297/330] Reduce the remain logs in bgworker. (#354) Fix from #346 --- src/diskquota.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/diskquota.c b/src/diskquota.c index dd68143775c..7d09f70dfae 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -541,7 +541,6 @@ disk_quota_worker_main(Datum main_arg) long sleep_time = diskquota_naptime * 1000; long secs; int usecs; - ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); while (!got_sigterm) { From 2c086fc82ea2d45f301e63fe9bd7860d012adbd3 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 4 Jul 2023 11:03:50 +0800 Subject: [PATCH 298/330] Fix bug: bgworkers only print log once. (#356) --- src/diskquota.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diskquota.c b/src/diskquota.c index 7d09f70dfae..84ac5e076ea 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -553,7 +553,7 @@ disk_quota_worker_main(Datum main_arg) * by the bgworker's pid in the log file. */ log_time = GetCurrentTimestamp(); - if (TimestampDifferenceExceeds(log_time, MyWorkerInfo->dbEntry->last_log_time, BGWORKER_LOG_TIME)) + if (TimestampDifferenceExceeds(MyWorkerInfo->dbEntry->last_log_time, log_time, BGWORKER_LOG_TIME)) { ereport(LOG, (errmsg("[diskquota] disk quota worker process is monitoring database:%s", dbname))); MyWorkerInfo->dbEntry->last_log_time = log_time; From 22c35b656b6587d1a411a0d1e3a4532f9ee062d0 Mon Sep 17 00:00:00 2001 From: liuxueyang Date: Tue, 4 Jul 2023 17:54:34 +0800 Subject: [PATCH 299/330] Update resources by using gp-extensions-ci subtree --- concourse/PROJ_NAME | 1 + concourse/README.md | 8 ++ concourse/fly.sh | 168 +----------------------- concourse/pipeline/base.lib.yml | 41 ------ concourse/pipeline/res_def.yml | 172 ------------------------- concourse/pipeline/trigger_def.lib.yml | 22 ++-- 6 files changed, 21 insertions(+), 391 deletions(-) create mode 100644 concourse/PROJ_NAME mode change 100755 => 120000 concourse/fly.sh delete mode 100644 concourse/pipeline/base.lib.yml diff --git a/concourse/PROJ_NAME b/concourse/PROJ_NAME new file mode 100644 index 00000000000..67f1c089995 --- /dev/null +++ b/concourse/PROJ_NAME @@ -0,0 +1 @@ +diskquota diff --git a/concourse/README.md b/concourse/README.md index 259b436bf38..1007f454077 100644 --- a/concourse/README.md +++ b/concourse/README.md @@ -81,6 +81,14 @@ To test if the webhook works, use `curl` to send a `POST` request to the hook UR curl --data-raw "foo" ``` +## Update gp-extensions-ci + +We place some of the resources of concourse in a separate repository https://github.com/pivotal/gp-extensions-ci/. And we use that repo as a subtree with prefix ./concourse/lib. This is how to pull from the repo gp-extensions-ci: + +```sh + git subtree pull --prefix concourse/lib git@github.com:pivotal/gp-extensions-ci.git main --squash +``` + # FAQ ## PR pipeline is not triggered. diff --git a/concourse/fly.sh b/concourse/fly.sh deleted file mode 100755 index 75404b6be6c..00000000000 --- a/concourse/fly.sh +++ /dev/null @@ -1,167 +0,0 @@ -#!/bin/bash - -set -e - -fly=${FLY:-"fly"} -echo "'fly' command: ${fly}" -echo "" -proj_name="diskquota" -concourse_team="main" - -usage() { - if [ -n "$1" ]; then - echo "$1" 1>&2 - echo "" 1>&2 - fi - - echo "Usage: $0 -t -c [-p ] [-b branch] [-T]" - echo "Options:" - echo " '-T' adds '_test' suffix to the pipeline type. Useful for pipeline debugging." - exit 1 -} - -# Hacky way to find out which concourse team is being used. -# The team name is needed to generate webhook URL -detect_concourse_team() { - local target="$1" - local fly_rc_file="$HOME/.flyrc" - local found_target=false - while read -r line; - do - line="$(echo -e "${line}" | tr -d '[:space:]')" - if [ ${found_target} != true ] && [ "${line}" = "${target}:" ]; then - found_target=true - fi - if [ ${found_target} = true ] && [[ "${line}" == team:* ]]; then - concourse_team=$(echo "${line}" | cut --delimiter=":" --fields=2) - echo "Use concourse target: ${target}, team: ${concourse_team}" - return - fi - done < "${fly_rc_file}" -} - -# Parse command line options -while getopts ":c:t:p:b:T" o; do - case "${o}" in - c) - # pipeline type/config. pr/merge/dev/rel - pipeline_config=${OPTARG} - ;; - t) - # concourse target - target=${OPTARG} - ;; - p) - # pipeline name - postfix=${OPTARG} - ;; - b) - # branch name - branch=${OPTARG} - ;; - T) - test_suffix="_test" - ;; - *) - usage "" - ;; - esac -done -shift $((OPTIND-1)) - -if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then - usage "" -fi - -detect_concourse_team "${target}" - -pipeline_type="" -# Decide ytt options to generate pipeline -case ${pipeline_config} in - pr) - pipeline_type="pr" - config_file="pr.yml" - hook_res="${proj_name}_pr" - ;; - merge|commit) - # Default branch is 'gpdb' as it is our main branch - if [ -z "${branch}" ]; then - branch="gpdb" - fi - pipeline_type="merge" - config_file="commit.yml" - hook_res="${proj_name}_commit" - ;; - dev) - if [ -z "${postfix}" ]; then - usage "'-p' needs to be supplied to specify the pipeline name postfix for flying a 'dev' pipeline." - fi - if [ -z "${branch}" ]; then - usage "'-b' needs to be supplied to specify the branch for flying a 'dev' pipeline." - fi - pipeline_type="dev" - config_file="dev.yml" - ;; - release|rel) - # Default branch is 'gpdb' as it is our main branch - if [ -z "${branch}" ]; then - branch="gpdb" - fi - pipeline_type="rel" - config_file="release.yml" - hook_res="${proj_name}_commit" - ;; - *) - usage "" - ;; -esac - -yml_path="/tmp/${proj_name}.yml" -my_path=$(realpath "${BASH_SOURCE[0]}") -ytt_base=$(dirname "${my_path}")/pipeline -# pipeline cannot contain '/' -pipeline_name=${pipeline_name/\//"_"} - -# Generate pipeline name -if [ -n "${test_suffix}" ]; then - pipeline_type="${pipeline_type}_test" -fi -pipeline_name="${pipeline_type}.${proj_name}" -if [ -n "${branch}" ]; then - pipeline_name="${pipeline_name}.${branch}" -fi -if [ -n "${postfix}" ]; then - pipeline_name="${pipeline_name}.${postfix}" -fi -# pipeline cannot contain '/' -pipeline_name=${pipeline_name/\//"_"} - -ytt --data-values-file "${ytt_base}/res_def.yml" \ - -f "${ytt_base}/base.lib.yml" \ - -f "${ytt_base}/job_def.lib.yml" \ - -f "${ytt_base}/trigger_def.lib.yml" \ - -f "${ytt_base}/${config_file}" > "${yml_path}" -echo "Generated pipeline yaml '${yml_path}'." - -echo "" -echo "Fly the pipeline..." -set -v -"${fly}" \ - -t "${target}" \ - sp \ - -p "${pipeline_name}" \ - -c "${yml_path}" \ - -v "${proj_name}-branch=${branch}" -set +v - -if [ "${pipeline_config}" == "dev" ]; then - exit 0 -fi - -concourse_url=$(fly targets | awk "{if (\$1 == \"${target}\") {print \$2}}") -echo "" -echo "================================================================================" -echo "Remeber to set the the webhook URL on GitHub:" -echo "${concourse_url}/api/v1/teams/${concourse_team}/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" -echo "You may need to change the base URL if a differnt concourse server is used." -echo "================================================================================" diff --git a/concourse/fly.sh b/concourse/fly.sh new file mode 120000 index 00000000000..09b854d19ec --- /dev/null +++ b/concourse/fly.sh @@ -0,0 +1 @@ +lib/fly.sh \ No newline at end of file diff --git a/concourse/pipeline/base.lib.yml b/concourse/pipeline/base.lib.yml deleted file mode 100644 index 1a62132a956..00000000000 --- a/concourse/pipeline/base.lib.yml +++ /dev/null @@ -1,41 +0,0 @@ -#@ load("@ytt:data", "data") -#! add_res_by_xxx is to solve the unused resources error for concourse -#@ def add_res_by_conf(res_map, job_conf): -#@ for key in job_conf: -#@ if key.startswith("res_"): -#@ val = job_conf[key] -#@ if type(val) == "list" or type(val) == "yamlfragment": -#@ for res_name in val: -#@ res_map[res_name] = True -#@ end -#@ else: -#@ res_name = val -#@ res_map[res_name] = True -#@ end -#@ end -#@ end -#@ end -#@ -#@ def add_res_by_name(res_map, res_name): -#@ res_map[res_name] = True -#@ end -#@ -#@ def declare_res(res_type_map, res_map): -#@ for val in data.values.resources: -#@ res_name = val["name"] -#@ res_type = val["type"] -#@ if res_map.get(val["name"]): -#@ res_type_map[res_type] = True - - #@ val -#@ end -#@ end -#@ end -#@ -#@ def declare_res_type(res_type_map): -#@ for val in data.values.resource_types: -#@ type_name = val["name"] -#@ if res_type_map.get(type_name): - - #@ val -#@ end -#@ end -#@ end diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index f53bb0a30b6..1206969afb5 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -1,21 +1,3 @@ -resource_types: -- name: gcs - type: docker-image - check_every: 1h - source: - repository: frodenas/gcs-resource - -- name: pull-request - type: docker-image - check_every: 1h - source: - repository: teliaoss/github-pr-resource - -- name: slack-alert - type: docker-image - source: - repository: arbourd/concourse-slack-alert-resource - resources: # Pull Request - name: diskquota_pr @@ -49,155 +31,6 @@ resources: password: x-oauth-basic -# Greenplum sources -- name: gpdb6_src - type: git - source: - branch: 6X_STABLE - uri: https://github.com/greenplum-db/gpdb.git -- name: gpdb7_src - type: git - source: - branch: main - uri: https://github.com/greenplum-db/gpdb.git - -# Image Resources -# centos6 -- name: centos6-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-build - tag: latest -- name: centos6-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-test - tag: latest -# centos7 -- name: centos7-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build - tag: latest -- name: centos7-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test - tag: latest -# rhel8 -- name: rhel8-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) -- name: rhel8-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) -# Ubuntu18 -- name: ubuntu18-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build - tag: latest -- name: ubuntu18-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test - tag: latest -# GPDB7 -# build -- name: rocky8-gpdb7-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-build - tag: latest -# test -- name: rocky8-gpdb7-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test - tag: latest -- name: rhel8-gpdb7-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb7-rhel8-test - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) - -# gpdb binary on gcs is located as different folder for different version -# Latest build with assertion enabled: -# --enable-cassert --enable-tap-tests --enable-debug-extensions -- name: bin_gpdb6_centos6_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel6_x86_64.debug.tar.gz -- name: bin_gpdb6_centos7_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel7_x86_64.debug.tar.gz -- name: bin_gpdb6_rhel8_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64.debug.tar.gz -- name: bin_gpdb6_ubuntu18_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*)-ubuntu18.04_x86_64.debug.tar.gz -- name: bin_gpdb7_el8_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-el8_x86_64.debug.tar.gz - -# Latest release candidates, no fault-injector, no assertion: -# --disable-debug-extensions --disable-tap-tests --enable-ic-proxy -- name: bin_gpdb6_centos6 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-centos6.tar.gz -- name: bin_gpdb6_centos7 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-centos7.tar.gz -- name: bin_gpdb6_rhel8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-rhel8.tar.gz -- name: bin_gpdb6_ubuntu18 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-ubuntu18.04.tar.gz -- name: bin_gpdb7_el8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb7/greenplum-db-server-(7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*-dev\.\d+)-.*-el8.tar.gz - # Diskquota releases - name: bin_diskquota_gpdb6_rhel6 type: gcs @@ -350,8 +183,3 @@ resources: bucket: gp-extensions-ci json_key: ((extension/extensions-gcs-service-account-key-dev2)) regexp: dependencies/cmake-(.*)-linux-x86_64.sh - -- name: slack_notify - type: slack-alert - source: - url: ((extension/extensions-slack-webhook)) diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml index c2854d02712..607cb7a3fa3 100644 --- a/concourse/pipeline/trigger_def.lib.yml +++ b/concourse/pipeline/trigger_def.lib.yml @@ -26,16 +26,16 @@ on_error: path: diskquota_src status: failure on_success: - put: diskquota_pr - params: - path: diskquota_src - status: success + put: diskquota_pr + params: + path: diskquota_src + status: success #@ end #! Commit trigger. For master pipelines #@ def commit_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") -#@ add_res_by_name(res_map, "slack_notify") +#@ add_res_by_name(res_map, "slack_notify_extensions") auto_trigger: true to_get: - get: diskquota_src @@ -46,11 +46,11 @@ to_put_post: #@ [] #! Unfortunately it doesn't work with Concourse 5. on_success: on_failure: - put: slack_notify + put: slack_notify_extensions params: alert_type: failed on_error: - put: slack_notify + put: slack_notify_extensions params: alert_type: errored #@ end @@ -74,7 +74,7 @@ on_error: #! Commit trigger. For release pipelines #@ def release_trigger(res_map): #@ add_res_by_name(res_map, "diskquota_commit") -#@ add_res_by_name(res_map, "slack_notify") +#@ add_res_by_name(res_map, "slack_notify_extensions") auto_trigger: true to_get: - get: diskquota_src @@ -86,16 +86,16 @@ to_put_post: repository: diskquota_src tag: diskquota_src/VERSION on_success: - put: slack_notify + put: slack_notify_extensions params: alert_type: success text: A new diskquota release has been pushed! on_failure: - put: slack_notify + put: slack_notify_extensions params: alert_type: failed on_error: - put: slack_notify + put: slack_notify_extensions params: alert_type: errored #@ end From dbd044f46d7672331e19cbd6b95a6f8df67511af Mon Sep 17 00:00:00 2001 From: liuxueyang Date: Wed, 5 Jul 2023 09:21:28 +0800 Subject: [PATCH 300/330] Squashed 'concourse/lib/' content from commit d51adf5 git-subtree-dir: concourse/lib git-subtree-split: d51adf5d59e4e5caefb678b29c4335881040c7d2 --- README.md | 1 + base.lib.yml | 60 ++++++++++++++++ fly.sh | 179 ++++++++++++++++++++++++++++++++++++++++++++++ res_def_gpdb.yml | 148 ++++++++++++++++++++++++++++++++++++++ res_def_misc.yml | 7 ++ res_types_def.yml | 21 ++++++ 6 files changed, 416 insertions(+) create mode 100644 README.md create mode 100644 base.lib.yml create mode 100755 fly.sh create mode 100644 res_def_gpdb.yml create mode 100644 res_def_misc.yml create mode 100644 res_types_def.yml diff --git a/README.md b/README.md new file mode 100644 index 00000000000..0dd8ca554c9 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# Common ytt libs for gp-extensions concourse diff --git a/base.lib.yml b/base.lib.yml new file mode 100644 index 00000000000..78072de58be --- /dev/null +++ b/base.lib.yml @@ -0,0 +1,60 @@ +#@ load("@ytt:data", "data") +#@ load("@ytt:struct", "struct") + +#! resources and resource_types could be declared in different data-value files. +#! We check the key prefix, if they are 'resources' and 'resource_types', just +#! merge them into local dictionaries. +#@ resources = [] +#@ resource_types = [] +#@ data_values_dict = struct.decode(data.values) +#@ for key in data_values_dict.keys(): +#@ if key.startswith('resources'): +#@ resources.extend(data_values_dict[key]) +#@ end +#@ end +#@ for key in data_values_dict.keys(): +#@ if key.startswith('resource_type'): +#@ resource_types.extend(data_values_dict[key]) +#@ end +#@ end + +#! add_res_by_xxx is to solve the unused resources error for concourse +#@ def add_res_by_conf(res_map, job_conf): +#@ for key in job_conf: +#@ if key.startswith("res_"): +#@ val = job_conf[key] +#@ if type(val) == "list" or type(val) == "yamlfragment": +#@ for res_name in val: +#@ res_map[res_name] = True +#@ end +#@ else: +#@ res_name = val +#@ res_map[res_name] = True +#@ end +#@ end +#@ end +#@ end +#@ +#@ def add_res_by_name(res_map, res_name): +#@ res_map[res_name] = True +#@ end +#@ +#@ def declare_res(res_type_map, res_map): +#@ for val in resources: +#@ res_name = val["name"] +#@ res_type = val["type"] +#@ if res_map.get(val["name"]): +#@ res_type_map[res_type] = True + - #@ val +#@ end +#@ end +#@ end +#@ +#@ def declare_res_type(res_type_map): +#@ for val in resource_types: +#@ type_name = val["name"] +#@ if res_type_map.get(type_name): + - #@ val +#@ end +#@ end +#@ end diff --git a/fly.sh b/fly.sh new file mode 100755 index 00000000000..d68c5c5c94f --- /dev/null +++ b/fly.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +set -e + +fly=${FLY:-"fly"} +echo "'fly' command: ${fly}" +echo "" + +my_path=$(realpath -s "${BASH_SOURCE[0]}") +my_dir=$(dirname "${my_path}") +proj_name_file="${my_dir}/PROJ_NAME" +if [ ! -f "${proj_name_file}" ]; then + echo "A 'PROJ_NAME' file is needed in '${my_dir}'" + exit 1 +fi +proj_name=$(cat "${proj_name_file}") +concourse_team="main" + +usage() { + if [ -n "$1" ]; then + echo "$1" 1>&2 + echo "" 1>&2 + fi + + echo "Usage: $0 -t -c [-p ] [-b branch] [-T]" + echo "Options:" + echo " '-T' adds '_test' suffix to the pipeline type. Useful for pipeline debugging." + exit 1 +} + +# Hacky way to find out which concourse team is being used. +# The team name is needed to generate webhook URL +detect_concourse_team() { + local target="$1" + local fly_rc_file="$HOME/.flyrc" + local found_target=false + while read -r line; + do + line="$(echo -e "${line}" | tr -d '[:space:]')" + if [ ${found_target} != true ] && [ "${line}" = "${target}:" ]; then + found_target=true + fi + if [ ${found_target} = true ] && [[ "${line}" == team:* ]]; then + concourse_team=$(echo "${line}" | cut --delimiter=":" --fields=2) + echo "Use concourse target: ${target}, team: ${concourse_team}" + return + fi + done < "${fly_rc_file}" +} + +# Parse command line options +while getopts ":c:t:p:b:T" o; do + case "${o}" in + c) + # pipeline type/config. pr/merge/dev/rel + pipeline_config=${OPTARG} + ;; + t) + # concourse target + target=${OPTARG} + ;; + p) + # pipeline name + postfix=${OPTARG} + ;; + b) + # branch name + branch=${OPTARG} + ;; + T) + test_suffix="_test" + ;; + *) + usage "" + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then + usage "" +fi + +detect_concourse_team "${target}" + +pipeline_type="" +# Decide ytt options to generate pipeline +case ${pipeline_config} in + pr) + pipeline_type="pr" + config_file="pr.yml" + hook_res="${proj_name}_pr" + ;; + merge|commit) + # Default branch is 'gpdb' as it is our main branch + if [ -z "${branch}" ]; then + branch="gpdb" + fi + pipeline_type="merge" + config_file="commit.yml" + hook_res="${proj_name}_commit" + ;; + dev) + if [ -z "${postfix}" ]; then + usage "'-p' needs to be supplied to specify the pipeline name postfix for flying a 'dev' pipeline." + fi + if [ -z "${branch}" ]; then + usage "'-b' needs to be supplied to specify the branch for flying a 'dev' pipeline." + fi + pipeline_type="dev" + config_file="dev.yml" + ;; + release|rel) + # Default branch is 'gpdb' as it is our main branch + if [ -z "${branch}" ]; then + branch="gpdb" + fi + pipeline_type="rel" + config_file="release.yml" + hook_res="${proj_name}_commit" + ;; + *) + usage "" + ;; +esac + +yml_path="/tmp/${proj_name}.yml" +pipeline_dir="${my_dir}/pipeline" +lib_dir="${my_dir}/lib" +# pipeline cannot contain '/' +pipeline_name=${pipeline_name/\//"_"} + +# Generate pipeline name +if [ -n "${test_suffix}" ]; then + pipeline_type="${pipeline_type}_test" +fi +pipeline_name="${pipeline_type}.${proj_name}" +if [ -n "${branch}" ]; then + pipeline_name="${pipeline_name}.${branch}" +fi +if [ -n "${postfix}" ]; then + pipeline_name="${pipeline_name}.${postfix}" +fi +# pipeline cannot contain '/' +pipeline_name=${pipeline_name/\//"_"} + +ytt \ + --data-values-file "${pipeline_dir}/res_def.yml" \ + --data-values-file "${lib_dir}/res_def_gpdb.yml" \ + --data-values-file "${lib_dir}/res_def_misc.yml" \ + --data-values-file "${lib_dir}/res_types_def.yml" \ + -f "${lib_dir}/base.lib.yml" \ + -f "${pipeline_dir}/job_def.lib.yml" \ + -f "${pipeline_dir}/trigger_def.lib.yml" \ + -f "${pipeline_dir}/${config_file}" > "${yml_path}" +echo "Generated pipeline yaml '${yml_path}'." + +echo "" +echo "Fly the pipeline..." +set -v +"${fly}" \ + -t "${target}" \ + sp \ + -p "${pipeline_name}" \ + -c "${yml_path}" \ + -v "${proj_name}-branch=${branch}" +set +v + +if [ "${pipeline_config}" == "dev" ]; then + exit 0 +fi + +concourse_url=$(fly targets | awk "{if (\$1 == \"${target}\") {print \$2}}") +echo "" +echo "================================================================================" +echo "Remeber to set the the webhook URL on GitHub:" +echo "${concourse_url}/api/v1/teams/${concourse_team}/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" +echo "You may need to change the base URL if a different concourse server is used." +echo "================================================================================" diff --git a/res_def_gpdb.yml b/res_def_gpdb.yml new file mode 100644 index 00000000000..b4384480b1c --- /dev/null +++ b/res_def_gpdb.yml @@ -0,0 +1,148 @@ +resources_gpdb: +# Image Resources +# centos6 +- name: centos6-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-build + tag: latest +- name: centos6-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-test + tag: latest +# centos7 +- name: centos7-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build + tag: latest +- name: centos7-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test + tag: latest +# rhel8 +- name: rhel8-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build + tag: latest + username: _json_key + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) +- name: rhel8-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test + tag: latest + username: _json_key + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) +# Ubuntu18 +- name: ubuntu18-gpdb6-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build + tag: latest +- name: ubuntu18-gpdb6-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test + tag: latest +# GPDB7 +# build +- name: rocky8-gpdb7-image-build + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-build + tag: latest +# test +- name: rocky8-gpdb7-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test + tag: latest +- name: rhel8-gpdb7-image-test + type: registry-image + source: + repository: gcr.io/data-gpdb-private-images/gpdb7-rhel8-test + tag: latest + username: _json_key + password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) + +# gpdb binary on gcs is located as different folder for different version +# Latest build with assertion enabled: +# --enable-cassert --enable-tap-tests --enable-debug-extensions +- name: bin_gpdb6_centos6_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel6_x86_64.debug.tar.gz +- name: bin_gpdb6_centos7_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel7_x86_64.debug.tar.gz +- name: bin_gpdb6_rhel8_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel8_x86_64.debug.tar.gz +- name: bin_gpdb6_ubuntu18_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-ubuntu18.04_x86_64.debug.tar.gz +- name: bin_gpdb7_el8_debug + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/main/server-rc-(.*\+dev\.\d+.*)-el8_x86_64.debug.tar.gz +# Latest release candidates, no fault-injector, no assertion: +# --disable-debug-extensions --disable-tap-tests --enable-ic-proxy +- name: bin_gpdb6_centos6 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-centos6.tar.gz +- name: bin_gpdb6_centos7 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-centos7.tar.gz +- name: bin_gpdb6_rhel8 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-rhel8.tar.gz +- name: bin_gpdb6_ubuntu18 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-ubuntu18.04.tar.gz +- name: bin_gpdb7_el8 + type: gcs + source: + bucket: pivotal-gpdb-concourse-resources-prod + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/release-candidates/gpdb7/greenplum-db-server-(7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-el8.tar.gz + +# Greenplum sources +- name: gpdb6_src + type: git + source: + branch: 6X_STABLE + uri: https://github.com/greenplum-db/gpdb.git +- name: gpdb7_src + type: git + source: + branch: main + uri: https://github.com/greenplum-db/gpdb.git diff --git a/res_def_misc.yml b/res_def_misc.yml new file mode 100644 index 00000000000..3105dd38d66 --- /dev/null +++ b/res_def_misc.yml @@ -0,0 +1,7 @@ +resources_misc: + +# Other dependencies +- name: slack_notify_extensions + type: slack-alert + source: + url: ((extension/extensions-slack-webhook)) diff --git a/res_types_def.yml b/res_types_def.yml new file mode 100644 index 00000000000..07eebf6ea94 --- /dev/null +++ b/res_types_def.yml @@ -0,0 +1,21 @@ +resource_types_common: +- name: gcs + type: registry-image + check_every: 1h + source: + repository: frodenas/gcs-resource + tag: latest + +- name: pull-request + type: registry-image + check_every: 1h + source: + repository: teliaoss/github-pr-resource + tag: latest + +- name: slack-alert + type: registry-image + source: + repository: arbourd/concourse-slack-alert-resource + tag: latest + From f9e940fbaf3dbec7f156b5c7f2c3b3a384c4dd0b Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Wed, 12 Jul 2023 09:47:01 +0800 Subject: [PATCH 301/330] Disable forks for PR resource (#358) --- concourse/pipeline/res_def.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml index 1206969afb5..89382604596 100644 --- a/concourse/pipeline/res_def.yml +++ b/concourse/pipeline/res_def.yml @@ -6,7 +6,7 @@ resources: webhook_token: ((extension/diskquota-webhook-token)) check_every: 24h source: - disable_forks: false + disable_forks: true repository: greenplum-db/diskquota access_token: ((extension/github-access-token)) # Commit trigger From 93b32c9526564f8277649d6d9b3a2aac51d40a88 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 28 Jul 2023 13:35:05 +0800 Subject: [PATCH 302/330] Release/gp7 2.2.2 (#361) Change the CI pipeline to release diskquota for GP7. TODO: - enable regress test on CI. - enable activatestandby test for GP7 on CI. - fix regress test for GP7. The view in GP7 will be treated as a relation by diskquota. After `CREATE EXTENSION`, the test case should execute `SELECT diskquota.init_table_size_table()` to make the diskquota.state ready on GP7. --- concourse/scripts/test_diskquota.sh | 6 ++++++ upgrade_test/alter_test.sh | 29 +++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 6abe35d04d6..f24180cc1a4 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -23,6 +23,12 @@ function _main() { source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh + # FIXME: remove this line after 2.2.2 released. + if [[ $PGPORT -eq 7000 ]] + then + exit + fi + pushd /home/gpadmin/gpdb_src make -C src/test/isolation2 install popd diff --git a/upgrade_test/alter_test.sh b/upgrade_test/alter_test.sh index 96e29af0684..ba6268c2b28 100755 --- a/upgrade_test/alter_test.sh +++ b/upgrade_test/alter_test.sh @@ -48,6 +48,25 @@ test_alter_from() { psql -d diskquota_alter_test -c "DROP EXTENSION diskquota" } +_determine_gp_major_version() { + local includedir="$(pg_config --includedir)" + GP_MAJORVERSION=$(grep -oP '.*GP_MAJORVERSION.*"\K[^"]+' "${includedir}/pg_config.h") +} +_determine_gp_major_version + +compare_versions() { + # implementing string manipulation + local a=${1%%.*} b=${2%%.*} + [[ "10#${a:-0}" -gt "10#${b:-0}" ]] && return 1 + [[ "10#${a:-0}" -lt "10#${b:-0}" ]] && return 2 + # re-assigning a and b with greatest of 1 and 2 after manipulation + a=${1:${#a} + 1} + b=${2:${#b} + 1} + # terminal condition for recursion + [[ -z $a && -z $b ]] || compare_versions "$a" "$b" +} + + # Find all minor versions before current one while IFS= read -r ver; do if [ "${ver}" = "${CUR_VERSION}" ]; then @@ -56,6 +75,16 @@ while IFS= read -r ver; do if [ "${ver}" = "0.8" ]; then continue fi + # The first version of diskquota for GP7 is 2.2 + if [ "$GP_MAJORVERSION" -eq "7" ]; then + set +e + compare_versions $ver "2.2" + cmp_res=$? + set -e + if [ $cmp_res -eq "2" ]; then + continue + fi + fi VERSIONS_TO_TEST+=("${ver}") done <<< "$ALL_VERSIONS" From 5aab83edcdf72214aed5bf61b736b3d41ac08bc2 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 24 Aug 2023 13:59:03 +0800 Subject: [PATCH 303/330] Update to 2.3.0 (#362) Fix the following issues: diskquota cannot correctly print a warning when calling `create extension` in a non-empty database. The diskquota.state shows whether the current database is empty. Previously, we updated the status to diskquota.state during `create extension` and queried the status in UDF `diskquota.diskquota_start_worker`. When querying the database status, the relations whose `relkind in ('v', 'c', 'f')` are skipped, while all relations are filtered when updating the database status. This patch merges the two SQL statements to `INSERT RETURNING` to solve this problem. Benefit: we won't need to upgrade the minor version of diskquota when changing the statement. Remove useless test `test_update_db_cache`. The behavior of the bgworker scheduler is changed, so this test is no longer helpful. Update the upgrade test script `2.2_set_quota`. Since the above issue, there should be `select diskquota.init_table_size_table()` in `2.2_set_quota`. Add upgrade test for gp7. Fix upgrade test for gp6. --- VERSION | 2 +- concourse/scripts/test_diskquota.sh | 6 - control/ddl/diskquota--2.2--2.3.sql | 45 ++ control/ddl/diskquota--2.3--2.2.sql | 45 ++ control/ddl/diskquota--2.3.sql | 322 ++++++++ control/ddl/diskquota.control | 2 +- control/test/diskquota_test--1.0.sql | 2 +- src/diskquota_utility.c | 14 +- .../expected7/test_create_extension.out | 15 + .../expected7/test_drop_extension.out | 12 + .../expected7/test_ereport_from_seg.out | 62 ++ .../expected7/test_fast_quota_view.out | 182 +++++ .../expected7/test_per_segment_config.out | 34 +- .../expected7/test_postmaster_restart.out | 72 +- tests/isolation2/expected7/test_rejectmap.out | 738 ++++++++++++++++++ .../expected7/test_relation_cache.out | 70 ++ .../expected7/test_relation_size.out | 87 +++ tests/isolation2/expected7/test_truncate.out | 86 ++ tests/isolation2/expected7/test_vacuum.out | 99 +++ tests/regress/diskquota_schedule | 1 - .../regress/expected/test_update_db_cache.out | 64 -- tests/regress/sql/config.sql | 1 + tests/regress/sql/test_update_db_cache.sql | 43 - upgrade_test/CMakeLists.txt | 9 +- upgrade_test/expected/2.2_set_quota.out | 13 +- .../2.2_test_in_2.3_quota_create_in_2.2.out | 16 + upgrade_test/expected/2.3_catalog.out | 310 ++++++++ upgrade_test/expected/2.3_cleanup_quota.out | 1 + upgrade_test/expected/2.3_install.out | 13 + .../expected/2.3_migrate_to_version_2.3.out | 10 + upgrade_test/expected/2.3_set_quota.out | 68 ++ .../2.3_test_in_2.2_quota_create_in_2.3.out | 16 + upgrade_test/expected7/2.2_catalog.out | 308 ++++++++ upgrade_test/expected7/2.2_cleanup_quota.out | 1 + upgrade_test/expected7/2.2_install.out | 13 + .../expected7/2.2_migrate_to_version_2.2.out | 10 + upgrade_test/expected7/2.2_set_quota.out | 72 ++ .../2.2_test_in_2.3_quota_create_in_2.2.out | 16 + upgrade_test/expected7/2.3_catalog.out | 308 ++++++++ upgrade_test/expected7/2.3_cleanup_quota.out | 1 + upgrade_test/expected7/2.3_install.out | 13 + .../expected7/2.3_migrate_to_version_2.3.out | 10 + upgrade_test/expected7/2.3_set_quota.out | 66 ++ .../2.3_test_in_2.2_quota_create_in_2.3.out | 16 + upgrade_test/schedule_2.2--2.3 | 8 + upgrade_test/schedule_2.3--2.2 | 8 + upgrade_test/sql/2.2_set_quota.sql | 3 +- .../2.2_test_in_2.3_quota_create_in_2.2.sql | 16 + upgrade_test/sql/2.3_catalog.sql | 81 ++ upgrade_test/sql/2.3_cleanup_quota.sql | 1 + upgrade_test/sql/2.3_install.sql | 17 + .../sql/2.3_migrate_to_version_2.3.sql | 8 + upgrade_test/sql/2.3_set_quota.sql | 44 ++ .../2.3_test_in_2.2_quota_create_in_2.3.sql | 16 + 54 files changed, 3317 insertions(+), 179 deletions(-) create mode 100644 control/ddl/diskquota--2.2--2.3.sql create mode 100644 control/ddl/diskquota--2.3--2.2.sql create mode 100644 control/ddl/diskquota--2.3.sql create mode 100644 tests/isolation2/expected7/test_create_extension.out create mode 100644 tests/isolation2/expected7/test_drop_extension.out create mode 100644 tests/isolation2/expected7/test_ereport_from_seg.out create mode 100644 tests/isolation2/expected7/test_fast_quota_view.out create mode 100644 tests/isolation2/expected7/test_rejectmap.out create mode 100644 tests/isolation2/expected7/test_relation_cache.out create mode 100644 tests/isolation2/expected7/test_relation_size.out create mode 100644 tests/isolation2/expected7/test_truncate.out create mode 100644 tests/isolation2/expected7/test_vacuum.out delete mode 100644 tests/regress/expected/test_update_db_cache.out delete mode 100644 tests/regress/sql/test_update_db_cache.sql create mode 100644 upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out create mode 100644 upgrade_test/expected/2.3_catalog.out create mode 100644 upgrade_test/expected/2.3_cleanup_quota.out create mode 100644 upgrade_test/expected/2.3_install.out create mode 100644 upgrade_test/expected/2.3_migrate_to_version_2.3.out create mode 100644 upgrade_test/expected/2.3_set_quota.out create mode 100644 upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out create mode 100644 upgrade_test/expected7/2.2_catalog.out create mode 100644 upgrade_test/expected7/2.2_cleanup_quota.out create mode 100644 upgrade_test/expected7/2.2_install.out create mode 100644 upgrade_test/expected7/2.2_migrate_to_version_2.2.out create mode 100644 upgrade_test/expected7/2.2_set_quota.out create mode 100644 upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out create mode 100644 upgrade_test/expected7/2.3_catalog.out create mode 100644 upgrade_test/expected7/2.3_cleanup_quota.out create mode 100644 upgrade_test/expected7/2.3_install.out create mode 100644 upgrade_test/expected7/2.3_migrate_to_version_2.3.out create mode 100644 upgrade_test/expected7/2.3_set_quota.out create mode 100644 upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out create mode 100644 upgrade_test/schedule_2.2--2.3 create mode 100644 upgrade_test/schedule_2.3--2.2 create mode 100644 upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql create mode 100644 upgrade_test/sql/2.3_catalog.sql create mode 100644 upgrade_test/sql/2.3_cleanup_quota.sql create mode 100644 upgrade_test/sql/2.3_install.sql create mode 100644 upgrade_test/sql/2.3_migrate_to_version_2.3.sql create mode 100644 upgrade_test/sql/2.3_set_quota.sql create mode 100644 upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql diff --git a/VERSION b/VERSION index b1b25a5ffae..276cbf9e285 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2.2 +2.3.0 diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index f24180cc1a4..6abe35d04d6 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -23,12 +23,6 @@ function _main() { source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh - # FIXME: remove this line after 2.2.2 released. - if [[ $PGPORT -eq 7000 ]] - then - exit - fi - pushd /home/gpadmin/gpdb_src make -C src/test/isolation2 install popd diff --git a/control/ddl/diskquota--2.2--2.3.sql b/control/ddl/diskquota--2.2--2.3.sql new file mode 100644 index 00000000000..4669f79a9eb --- /dev/null +++ b/control/ddl/diskquota--2.2--2.3.sql @@ -0,0 +1,45 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.3.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.3.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.3.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.3.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.3.so', 'show_relation_cache' LANGUAGE C; + +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.3.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end diff --git a/control/ddl/diskquota--2.3--2.2.sql b/control/ddl/diskquota--2.3--2.2.sql new file mode 100644 index 00000000000..35dd1b29b76 --- /dev/null +++ b/control/ddl/diskquota--2.3--2.2.sql @@ -0,0 +1,45 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.2.so + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; + +DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end diff --git a/control/ddl/diskquota--2.3.sql b/control/ddl/diskquota--2.3.sql new file mode 100644 index 00000000000..8be7749f1aa --- /dev/null +++ b/control/ddl/diskquota--2.3.sql @@ -0,0 +1,322 @@ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION diskquota" to load this file. \quit + +CREATE SCHEMA diskquota; + +-- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; +-- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; +CREATE TABLE diskquota.quota_config( + targetOid oid, + quotatype int, + quotalimitMB int8, + segratio float4 DEFAULT 0, + PRIMARY KEY(targetOid, quotatype) +) DISTRIBUTED BY (targetOid, quotatype); + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, --REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, --REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); + +CREATE TABLE diskquota.table_size( + tableid oid, + size bigint, + segid smallint, + PRIMARY KEY(tableid, segid) +) DISTRIBUTED BY (tableid, segid); + +CREATE TABLE diskquota.state( + state int, + PRIMARY KEY(state) +) DISTRIBUTED BY (state); + +-- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly +SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); + +CREATE TYPE diskquota.diskquota_active_table_type AS ( + "TABLE_OID" oid, + "TABLE_SIZE" int8, + "GP_SEGMENT_ID" smallint +); + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[], + RELAM oid +); + +CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.3.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.3.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.3.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.3.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.3.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.3.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.3.so', 'pull_all_table_size' LANGUAGE C; + +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, + CASE WHEN EXISTS + (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, + relam) AS size + FROM pg_class as relstorage WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; + +-- view part +CREATE VIEW diskquota.show_all_relation_view AS +WITH + relation_cache AS ( + SELECT (f).* FROM diskquota.show_relation_cache() as f + ) +SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( + SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache + UNION + SELECT oid, relowner, relnamespace, reltablespace from pg_class +) as union_relation; + +CREATE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +CREATE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryOid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + diskquota.show_all_relation_view, + default_tablespace + WHERE + tableid = diskquota.show_all_relation_view.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- view end + +-- re-dispatch pause status to false. in case user pause-drop-recreate. +-- refer to see test case 'test_drop_after_pause' +SELECT FROM diskquota.resume(); + + +--- Starting the worker has to be the last step. +CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.3.so' LANGUAGE C; +SELECT diskquota.diskquota_start_worker(); +DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/control/ddl/diskquota.control b/control/ddl/diskquota.control index 32b4809fde1..4e5f6e5e6cf 100644 --- a/control/ddl/diskquota.control +++ b/control/ddl/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' -default_version = '2.2' +default_version = '2.3' module_pathname = 'do-not-use-this' relocatable = true diff --git a/control/test/diskquota_test--1.0.sql b/control/test/diskquota_test--1.0.sql index f5e39444aa9..2a86e965417 100644 --- a/control/test/diskquota_test--1.0.sql +++ b/control/test/diskquota_test--1.0.sql @@ -27,7 +27,7 @@ CREATE TYPE diskquota_test.db_status AS ( "epoch" int8, "paused" bool ); -CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.2.so', 'db_status' LANGUAGE C VOLATILE; +CREATE FUNCTION diskquota_test.db_status() RETURNS setof diskquota_test.db_status AS '$libdir/diskquota-2.3.so', 'db_status' LANGUAGE C VOLATILE; CREATE FUNCTION diskquota_test.cur_db_status() RETURNS diskquota_test.db_status AS $$ SELECT * from diskquota_test.db_status() where datname = current_database(); $$ LANGUAGE SQL; diff --git a/src/diskquota_utility.c b/src/diskquota_utility.c index f406809c875..00dab97b520 100644 --- a/src/diskquota_utility.c +++ b/src/diskquota_utility.c @@ -499,17 +499,19 @@ is_database_empty(void) SPI_connect(); ret = SPI_execute( - "SELECT (count(relname) = 0) " + "INSERT INTO diskquota.state SELECT (count(relname) = 0)::int " "FROM " " pg_class AS c, " " pg_namespace AS n " - "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota'" - " and relkind not in ('v', 'c', 'f')", - true, 0); - if (ret != SPI_OK_SELECT) + "WHERE c.oid > 16384 and relnamespace = n.oid and nspname != 'diskquota' " + "and relkind not in ('v', 'c', 'f') " + "returning state", + false, 0); + if (ret != SPI_OK_INSERT_RETURNING) { int saved_errno = errno; - elog(ERROR, "cannot select pg_class and pg_namespace table, reason: %s.", strerror(saved_errno)); + elog(ERROR, "cannot select pg_class and pg_namespace table and update diskquota.state, reason: %s.", + strerror(saved_errno)); } tupdesc = SPI_tuptable->tupdesc; diff --git a/tests/isolation2/expected7/test_create_extension.out b/tests/isolation2/expected7/test_create_extension.out new file mode 100644 index 00000000000..f34d591a4da --- /dev/null +++ b/tests/isolation2/expected7/test_create_extension.out @@ -0,0 +1,15 @@ +CREATE EXTENSION diskquota; +CREATE EXTENSION + +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +-- Wait after init so that diskquota.state is clean +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) diff --git a/tests/isolation2/expected7/test_drop_extension.out b/tests/isolation2/expected7/test_drop_extension.out new file mode 100644 index 00000000000..7e2997004dd --- /dev/null +++ b/tests/isolation2/expected7/test_drop_extension.out @@ -0,0 +1,12 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +DROP EXTENSION diskquota; +DROP EXTENSION diff --git a/tests/isolation2/expected7/test_ereport_from_seg.out b/tests/isolation2/expected7/test_ereport_from_seg.out new file mode 100644 index 00000000000..67ae6925df9 --- /dev/null +++ b/tests/isolation2/expected7/test_ereport_from_seg.out @@ -0,0 +1,62 @@ +CREATE SCHEMA efs1; +CREATE SCHEMA +SELECT diskquota.set_schema_quota('efs1', '1MB'); + set_schema_quota +------------------ + +(1 row) +CREATE TABLE efs1.t(i int); +CREATE TABLE + +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 0 10000 +-- wait for refresh of diskquota and check the quota size +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 688128 +(1 row) + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +INSERT INTO efs1.t SELECT generate_series(1, 10000); +INSERT 0 10000 + +-- wait for refresh of diskquota and check whether the quota size changes +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + efs1 | 1 | 1081344 +(1 row) + +DROP TABLE efs1.t; +DROP TABLE +DROP SCHEMA efs1; +DROP SCHEMA + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/tests/isolation2/expected7/test_fast_quota_view.out b/tests/isolation2/expected7/test_fast_quota_view.out new file mode 100644 index 00000000000..75ee06e7da9 --- /dev/null +++ b/tests/isolation2/expected7/test_fast_quota_view.out @@ -0,0 +1,182 @@ +CREATE SCHEMA s1; +CREATE SCHEMA +CREATE SCHEMA s2; +CREATE SCHEMA + +CREATE ROLE r LOGIN SUPERUSER; +CREATE ROLE + +!\retcode mkdir -p /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode mkdir -p /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) + +DROP TABLESPACE IF EXISTS spc1; +DROP TABLESPACE +CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; +CREATE TABLESPACE +DROP TABLESPACE IF EXISTS spc2; +DROP TABLESPACE +CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; +CREATE TABLESPACE + +SELECT diskquota.set_schema_quota('s1', '100 MB'); + set_schema_quota +------------------ + +(1 row) +SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) +SELECT diskquota.set_role_quota('r', '100 MB'); + set_role_quota +---------------- + +(1 row) +SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +-- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: INSERT INTO s1.t SELECT generate_series(1, 100000); +INSERT 0 100000 + +1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); +CREATE TABLE +1: INSERT INTO s2.t SELECT generate_series(1, 100000); +INSERT 0 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check schema quota view before transaction commits +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; + schema_name | quota_in_mb | nspsize_in_bytes +-------------+-------------+------------------ + s1 | 100 | 3932160 +(1 row) +2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; + schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes +-------------+-----------------+-------------+----------------------------- + s2 | spc1 | 100 | 3932160 +(1 row) + +-- login r to test role quota view +1: SET ROLE r; +SET + +-- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view +1: BEGIN; +BEGIN +1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: INSERT INTO t1 SELECT generate_series(1, 100000); +INSERT 0 100000 + +1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); +CREATE TABLE +1: INSERT INTO t2 SELECT generate_series(1, 100000); +INSERT 0 100000 + +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- check role quota view before transaction commits +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +1: COMMIT; +COMMIT +2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; + role_name | quota_in_mb | rolsize_in_bytes +-----------+-------------+------------------ + r | 100 | 7864320 +(1 row) +2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; + role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes +-----------+-----------------+-------------+----------------------------- + r | spc2 | 100 | 3932160 +(1 row) + +DROP TABLE IF EXISTS s1.t; +DROP TABLE +DROP TABLE IF EXISTS s2.t; +DROP TABLE +DROP TABLE IF EXISTS t1; +DROP TABLE +DROP TABLE IF EXISTS t2; +DROP TABLE + +DROP SCHEMA IF EXISTS s1; +DROP SCHEMA +DROP SCHEMA IF EXISTS s2; +DROP SCHEMA +DROP ROLE IF EXISTS r; +DROP ROLE + +DROP TABLESPACE IF EXISTS spc1; +DROP TABLESPACE +DROP TABLESPACE IF EXISTS spc2; +DROP TABLESPACE + +!\retcode rm -rf /tmp/spc1; +-- start_ignore + +-- end_ignore +(exited with code 0) +!\retcode rm -rf /tmp/spc2; +-- start_ignore + +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected7/test_per_segment_config.out b/tests/isolation2/expected7/test_per_segment_config.out index 79b4a8ffcdc..1a6deb8baed 100644 --- a/tests/isolation2/expected7/test_per_segment_config.out +++ b/tests/isolation2/expected7/test_per_segment_config.out @@ -10,11 +10,11 @@ (exited with code 0) -- end_ignore CREATE SCHEMA s101; -CREATE +CREATE SCHEMA DROP TABLESPACE IF EXISTS spc101; -DROP +DROP TABLESPACE CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; -CREATE +CREATE TABLESPACE -- -- There is no tablesapce per segment quota configed yet @@ -53,9 +53,9 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (1 row) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE -- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, 1: BEGIN; @@ -90,9 +90,9 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (1 row) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE -- -- There is already a tablesapce per segment quota configed @@ -136,9 +136,9 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (1 row) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE -- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, SELECT diskquota.set_per_segment_quota('spc101', 2); @@ -178,9 +178,9 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (1 row) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE -- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota SELECT diskquota.set_per_segment_quota('spc101', 2); @@ -219,9 +219,9 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (0 rows) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE -- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio SELECT diskquota.set_per_segment_quota('spc101', 2); @@ -260,10 +260,10 @@ SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid (0 rows) -- cleanup truncate table diskquota.quota_config; -TRUNCATE +TRUNCATE TABLE truncate table diskquota.target; -TRUNCATE +TRUNCATE TABLE DROP SCHEMA s101; -DROP +DROP SCHEMA DROP TABLESPACE spc101; -DROP +DROP TABLESPACE diff --git a/tests/isolation2/expected7/test_postmaster_restart.out b/tests/isolation2/expected7/test_postmaster_restart.out index bf842f49749..ccc9c53a43f 100644 --- a/tests/isolation2/expected7/test_postmaster_restart.out +++ b/tests/isolation2/expected7/test_postmaster_restart.out @@ -10,7 +10,7 @@ (exited with code 0) 1: CREATE SCHEMA postmaster_restart_s; -CREATE +CREATE SCHEMA 1: SET search_path TO postmaster_restart_s; SET @@ -27,29 +27,29 @@ SET -- expect fail 1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 33502 (seg2 127.0.0.1:7004 pid=675047) +ERROR: schema's disk space quota exceeded with name: 17614 (seg0 127.0.0.1:7002 pid=854097) 1q: ... -- launcher should exist -- [p]ostgres is to filter out the pgrep itself !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore -673843 -673846 -673855 -673857 -673872 -673875 -673925 -673943 -673944 +839083 +839087 +839094 +839097 +839109 +839112 +839139 +839157 +839160 -- end_ignore (exited with code 0) -- bgworker should exist !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- start_ignore -674189 +839377 -- end_ignore (exited with code 0) @@ -66,12 +66,12 @@ server stopped -- launcher should be terminated !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore -673843 -673846 -673855 -673857 -673872 -673875 +839083 +839087 +839094 +839097 +839109 +839112 -- end_ignore (exited with code 0) @@ -87,11 +87,11 @@ server stopped -- See https://github.com/greenplum-db/gpdb/pull/9396 !\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-c gp_role=dispatch" start; -- start_ignore -waiting for server to start....2023-03-06 16:13:41.483928 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","starting PostgreSQL 12.12 (Greenplum Database 7.0.0-beta.1+dev.215.gb9adc4ece5 build dev) on x86_64-pc-linux-gnu, compiled by clang version 15.0.7, 64-bit",,,,,,,,"PostmasterMain","postmaster.c",1237, -2023-03-06 16:13:41.484093 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv4 address ""0.0.0.0"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, -2023-03-06 16:13:41.484153 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv6 address ""::"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, -2023-03-06 16:13:41.484241 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","listening on Unix socket ""/tmp/.s.PGSQL.7000""",,,,,,,,"StreamServerPort","pqcomm.c",625, -2023-03-06 16:13:41.510380 CST,,,p675192,th987391872,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",929, +waiting for server to start....2023-07-31 15:59:31.043830 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","starting PostgreSQL 12.12 (Greenplum Database 7.0.0-beta.4+dev.218.g9ec0a0a842 build dev) on x86_64-pc-linux-gnu, compiled by clang version 15.0.7, 64-bit",,,,,,,,"PostmasterMain","postmaster.c",1243, +2023-07-31 15:59:31.044012 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv4 address ""0.0.0.0"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, +2023-07-31 15:59:31.044060 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv6 address ""::"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, +2023-07-31 15:59:31.044140 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on Unix socket ""/tmp/.s.PGSQL.7000""",,,,,,,,"StreamServerPort","pqcomm.c",625, +2023-07-31 15:59:31.076319 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",929, done server started @@ -107,22 +107,22 @@ server started -- launcher should be restarted !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore -673843 -673846 -673855 -673857 -673872 -673875 -675198 -675213 -675217 +839083 +839087 +839094 +839097 +839109 +839112 +854271 +854289 +854293 -- end_ignore (exited with code 0) -- bgworker should be restarted !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- start_ignore -675239 +854311 -- end_ignore (exited with code 0) @@ -136,7 +136,7 @@ SET (1 row) -- expect fail 1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 33502 (seg2 127.0.0.1:7004 pid=679604) +ERROR: schema's disk space quota exceeded with name: 17614 (seg0 127.0.0.1:7002 pid=858309) -- enlarge the quota limits 1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); set_schema_quota @@ -150,10 +150,10 @@ ERROR: schema's disk space quota exceeded with name: 33502 (seg2 127.0.0.1:700 (1 row) -- expect succeed 1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); -CREATE 1000000 +SELECT 1000000 1: DROP SCHEMA postmaster_restart_s CASCADE; -DROP +DROP SCHEMA 1q: ... !\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; -- start_ignore diff --git a/tests/isolation2/expected7/test_rejectmap.out b/tests/isolation2/expected7/test_rejectmap.out new file mode 100644 index 00000000000..385889fae66 --- /dev/null +++ b/tests/isolation2/expected7/test_rejectmap.out @@ -0,0 +1,738 @@ +-- +-- This file contains tests for dispatching rejectmap and canceling +-- queries in smgrextend hook by relation's relfilenode. +-- + +-- Enable check quota by relfilenode on seg0. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- this function return valid tablespaceoid. +-- For role/namespace quota, return as it is. +-- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. +CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ +CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE FUNCTION + +-- 1. Test canceling the extending of an ordinary table. +CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO blocked_t1 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 2. Test canceling the extending of a toast relation. +CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO blocked_t2 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 3. Test canceling the extending of an appendonly relation. +CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO blocked_t3 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 4. Test canceling the extending of an index relation. +CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); +CREATE TABLE +CREATE INDEX blocked_t4_index ON blocked_t4(i); +CREATE INDEX +INSERT INTO blocked_t4 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +-- Dispatch rejectmap to seg0. +SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); + block_relation_on_seg0 +------------------------ + +(1 row) + +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +-- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) + +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO blocked_t5 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. +CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO blocked_t6 SELECT generate_series(1, 100); +INSERT 0 100 +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) + +1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); + +SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_wait_until_triggered_fault +------------------------------- + Success: +(1 row) + +SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); + block_relation_on_seg0 +------------------------ + +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Do some clean-ups. +DROP TABLE blocked_t1; +DROP TABLE +DROP TABLE blocked_t2; +DROP TABLE +DROP TABLE blocked_t3; +DROP TABLE +DROP TABLE blocked_t4; +DROP TABLE +DROP TABLE blocked_t5; +DROP TABLE +DROP TABLE blocked_t6; +DROP TABLE + +-- +-- Below are helper functions for testing adding uncommitted relations to rejectmap. +-- +-- start_ignore +CREATE OR REPLACE LANGUAGE plpython3u; +CREATE LANGUAGE +-- end_ignore +CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); +CREATE TYPE + +-- This function dumps given relation_cache entries to the given file. +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython3u; +CREATE FUNCTION + +-- This function reads relation_cache entries from the given file. +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpython3u; +CREATE FUNCTION + +-- This function replaces the oid appears in the auxiliary relation's name +-- with the corresponding relname of that oid. +CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; +CREATE FUNCTION + +-- This function helps dispatch rejectmap for the given relation to seg0. +CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; +CREATE FUNCTION + +-- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+----------------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; + segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid +-------+--------------+---------------+----------+--------------------------+-----------------------+------------ + 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 +(1 row) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 13. Test that we are able to block a toast relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+---------------------------+-----------------+------------ + 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(3 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. +1: BEGIN; +BEGIN +1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); +CREATE TABLE +1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); + dump_relation_cache_to_file +----------------------------- + +(1 row) +-- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +-- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). +1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); +SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); + block_uncommitted_relation_on_seg0 +------------------------------------ + +(1 row) +-- Show that blocked_t7 is blocked on seg0. +2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; + segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid +-------+--------------+---------------+----------+-------------------------------+-----------------+------------ + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 +(4 rows) +SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) +1<: <... completed> +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) +1: ABORT; +ROLLBACK +-- Clean up the rejectmap on seg0. +SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; + refresh_rejectmap +------------------- + +(1 row) + +-- Reset fault injection points set by us at the top of this test. +SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; + gp_inject_fault_infinite +-------------------------- + Success: +(1 row) diff --git a/tests/isolation2/expected7/test_relation_cache.out b/tests/isolation2/expected7/test_relation_cache.out new file mode 100644 index 00000000000..14ad39661aa --- /dev/null +++ b/tests/isolation2/expected7/test_relation_cache.out @@ -0,0 +1,70 @@ +CREATE DATABASE tempdb1; +CREATE DATABASE +CREATE DATABASE tempdb2; +CREATE DATABASE + +-- perpare extension +1:@db_name tempdb1: CREATE EXTENSION diskquota; +CREATE EXTENSION +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: CREATE EXTENSION diskquota; +CREATE EXTENSION +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- create a table in tempdb1 +1:@db_name tempdb1: BEGIN; +BEGIN +1:@db_name tempdb1: CREATE TABLE t(i int); +CREATE TABLE +1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); +INSERT 0 10000 + +-- query relation_cache in tempdb2 +2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); + count +------- + 0 +(1 row) + +1:@db_name tempdb1: ABORT; +ROLLBACK + +1:@db_name tempdb1: SELECT diskquota.pause(); + pause +------- + +(1 row) +1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1:@db_name tempdb1: DROP EXTENSION diskquota; +DROP EXTENSION +2:@db_name tempdb2: SELECT diskquota.pause(); + pause +------- + +(1 row) +2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +2:@db_name tempdb2: DROP EXTENSION diskquota; +DROP EXTENSION +1q: ... +2q: ... + +DROP DATABASE tempdb1; +DROP DATABASE +DROP DATABASE tempdb2; +DROP DATABASE diff --git a/tests/isolation2/expected7/test_relation_size.out b/tests/isolation2/expected7/test_relation_size.out new file mode 100644 index 00000000000..3ddafe8fda5 --- /dev/null +++ b/tests/isolation2/expected7/test_relation_size.out @@ -0,0 +1,87 @@ +-- +-- 1. Test that when a relation is dropped before diskquota.relation_size() +-- applying stat(2) on the physical file, diskquota.relation_size() consumes +-- the error and returns 0. +-- + +CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); +CREATE TABLE +-- Insert a small amount of data to 't_dropped'. +INSERT INTO t_dropped SELECT generate_series(1, 100); +INSERT 0 100 +-- Shows that the size of relfilenode is not zero. +SELECT diskquota.relation_size('t_dropped'); + relation_size +--------------- + 98304 +(1 row) + +-- Inject 'suspension' to servers. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) + +-- Session 1 will hang before applying stat(2) to the physical file. +1&: SELECT diskquota.relation_size('t_dropped'); +-- Wait until the fault is triggered to avoid the following race condition: +-- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" +-- and the query will fail with 'ERROR: relation "t_dropped" does not exist' +SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Drop the table. +DROP TABLE t_dropped; +DROP TABLE +-- Remove the injected 'suspension'. +SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +-- Session 1 will continue and returns 0. +1<: <... completed> + relation_size +--------------- + 0 +(1 row) + +-- 2. Test whether relation size is correct under concurrent writes for AO tables. +-- Since no row is deleted, diskquota.relation_size() should be equal to +-- pg_relation_size(). + +CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); +CREATE TABLE +1: BEGIN; +BEGIN +1: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 0 10000 +2: BEGIN; +BEGIN +2: INSERT INTO t_ao SELECT generate_series(1, 10000); +INSERT 0 10000 +1: COMMIT; +COMMIT +2: COMMIT; +COMMIT +SELECT diskquota.relation_size('t_ao'); + relation_size +--------------- + 200400 +(1 row) +SELECT pg_relation_size('t_ao'); + pg_relation_size +------------------ + 200400 +(1 row) +DROP TABLE t_ao; +DROP TABLE diff --git a/tests/isolation2/expected7/test_truncate.out b/tests/isolation2/expected7/test_truncate.out new file mode 100644 index 00000000000..64b0fef803c --- /dev/null +++ b/tests/isolation2/expected7/test_truncate.out @@ -0,0 +1,86 @@ +-- Test various race conditions for TRUNCATE. + +-- Case 1: Pulling active table before swapping relfilenode +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 0 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) + +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: TRUNCATE dummy_t1; +SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +TRUNCATE TABLE + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP TABLE diff --git a/tests/isolation2/expected7/test_vacuum.out b/tests/isolation2/expected7/test_vacuum.out new file mode 100644 index 00000000000..f40397f3ca8 --- /dev/null +++ b/tests/isolation2/expected7/test_vacuum.out @@ -0,0 +1,99 @@ +-- This file tests various race conditions when performing 'VACUUM FULL'. + +-- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized +-- as the following 3 steps: +-- s1) create a new temporary relation (smgrcreate hook will be triggered, newly +-- created relfilenode will be put into shmem). +-- s2) insert data into the newly created relation from the old relation (smgrextend +-- hook will be triggered, newly created relfilenode will be put into shmem). +-- s3) change the old relation's relfilenode to the newly created one. +-- Consider the following situation: +-- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, +-- the newly created relfilenode is translated to the newly created temporary relation's oid, +-- the old relation's size cannot be updated. We resolve it by making altered relations' oids +-- constantly active so that the diskquota bgworker keeps updating the altered relation size +-- during 'VACUUM FULL'. +CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); +CREATE TABLE +INSERT INTO dummy_t1 SELECT generate_series(1, 1000); +INSERT 0 1000 +DELETE FROM dummy_t1; +DELETE 1000 +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 98304 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+-------+------- + dummy_t1 | 98304 | -1 + dummy_t1 | 32768 | 0 + dummy_t1 | 32768 | 1 + dummy_t1 | 32768 | 2 +(4 rows) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1&: VACUUM FULL dummy_t1; +SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_wait_until_triggered_fault +------------------------------- + Success: + Success: + Success: +(3 rows) +-- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; + gp_inject_fault_infinite +-------------------------- + Success: + Success: + Success: +(3 rows) +1<: <... completed> +VACUUM + +-- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +-- Shows that the result of pg_table_size() and diskquota.table_size are identical. +SELECT pg_table_size('dummy_t1'); + pg_table_size +--------------- + 0 +(1 row) +SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; + tableid | size | segid +----------+------+------- + dummy_t1 | 0 | -1 + dummy_t1 | 0 | 0 + dummy_t1 | 0 | 1 + dummy_t1 | 0 | 2 +(4 rows) +DROP TABLE dummy_t1; +DROP TABLE diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 9805a8e4fc2..c3a25ff43f0 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -9,7 +9,6 @@ test: test_pause_and_resume test: test_pause_and_resume_multiple_db test: test_drop_after_pause test: test_show_status -test: test_update_db_cache test: test_quota_view_no_table # disable this test due to GPDB behavior change # test: test_table_size diff --git a/tests/regress/expected/test_update_db_cache.out b/tests/regress/expected/test_update_db_cache.out deleted file mode 100644 index 785c8bff409..00000000000 --- a/tests/regress/expected/test_update_db_cache.out +++ /dev/null @@ -1,64 +0,0 @@ ---start_ignore -CREATE DATABASE test_db_cache; ---end_ignore -\c test_db_cache -CREATE EXTENSION diskquota; -CREATE EXTENSION diskquota_test; --- Wait until the db cache gets updated -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -CREATE TABLE t(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 't'::regclass -ORDER BY segid; - tableid | size | segid ----------+---------+------- - t | 3637248 | -1 - t | 1212416 | 0 - t | 1212416 | 1 - t | 1212416 | 2 -(4 rows) - -DROP EXTENSION diskquota; --- Create table without extension -CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); -CREATE EXTENSION diskquota; -WARNING: [diskquota] diskquota is not ready because current database is not empty -HINT: please run 'SELECT diskquota.init_table_size_table();' to initialize diskquota -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - wait ------- - t -(1 row) - --- Should find nothing since t_no_extension is not recorded. -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - diskquota_fetch_table_stat ----------------------------- -(0 rows) - -DROP TABLE t; -DROP TABLE t_no_extension; -SELECT diskquota.pause(); - pause -------- - -(1 row) - -DROP EXTENSION diskquota; -\c contrib_regression -DROP DATABASE test_db_cache; diff --git a/tests/regress/sql/config.sql b/tests/regress/sql/config.sql index f6755ab2122..d8f54870ae4 100644 --- a/tests/regress/sql/config.sql +++ b/tests/regress/sql/config.sql @@ -6,6 +6,7 @@ CREATE DATABASE diskquota; \! gpconfig -c max_worker_processes -v 20 --skipvalidation \! gpconfig -c diskquota.hard_limit -v "off" --skipvalidation \! gpconfig -c diskquota.max_workers -v 1 --skipvalidation +\! gpconfig -c plpython3.python_path -v "'$GPHOME/lib/python'" --skipvalidation; \! gpconfig -c log_min_messages -v debug1 \! gpstop -raf diff --git a/tests/regress/sql/test_update_db_cache.sql b/tests/regress/sql/test_update_db_cache.sql deleted file mode 100644 index c426d1183f2..00000000000 --- a/tests/regress/sql/test_update_db_cache.sql +++ /dev/null @@ -1,43 +0,0 @@ ---start_ignore -CREATE DATABASE test_db_cache; ---end_ignore - -\c test_db_cache -CREATE EXTENSION diskquota; -CREATE EXTENSION diskquota_test; - --- Wait until the db cache gets updated -SELECT diskquota.wait_for_worker_new_epoch(); - -CREATE TABLE t(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); - -SELECT diskquota.wait_for_worker_new_epoch(); - -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE tableid = 't'::regclass -ORDER BY segid; - -DROP EXTENSION diskquota; - --- Create table without extension -CREATE TABLE t_no_extension(i) AS SELECT generate_series(1, 100000) -DISTRIBUTED BY (i); - -CREATE EXTENSION diskquota; - -SELECT diskquota_test.wait('SELECT diskquota_test.check_cur_db_status(''UNREADY'');'); - --- Should find nothing since t_no_extension is not recorded. -SELECT diskquota.diskquota_fetch_table_stat(0, ARRAY[]::oid[]) -FROM gp_dist_random('gp_id'); - -DROP TABLE t; -DROP TABLE t_no_extension; -SELECT diskquota.pause(); - -DROP EXTENSION diskquota; - -\c contrib_regression -DROP DATABASE test_db_cache; diff --git a/upgrade_test/CMakeLists.txt b/upgrade_test/CMakeLists.txt index f151bd66c02..bf96af5f288 100644 --- a/upgrade_test/CMakeLists.txt +++ b/upgrade_test/CMakeLists.txt @@ -14,7 +14,14 @@ if (${GP_MAJOR_VERSION} EQUAL 6) ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.2 ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.1 ) + set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected") +else() + set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected7") endif() +list(APPEND schedule_files + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.3 + ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.3--2.2 +) regresstarget_add( upgradecheck INIT_FILE @@ -22,7 +29,7 @@ regresstarget_add( SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sql EXPECTED_DIR - ${CMAKE_CURRENT_SOURCE_DIR}/expected + ${EXPECTED_DIR} RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/results SCHEDULE_FILE diff --git a/upgrade_test/expected/2.2_set_quota.out b/upgrade_test/expected/2.2_set_quota.out index 400f3e5435e..2d2d5486c8a 100644 --- a/upgrade_test/expected/2.2_set_quota.out +++ b/upgrade_test/expected/2.2_set_quota.out @@ -4,7 +4,18 @@ GUC : shared_preload_libraries Master value: diskquota-2.2.so Segment value: diskquota-2.2.so create extension diskquota with version '2.2'; -\!sleep 5 +select diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- schema quota create schema s1; select diskquota.set_schema_quota('s1', '1 MB'); diff --git a/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out b/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out new file mode 100644 index 00000000000..aab1cb100c1 --- /dev/null +++ b/upgrade_test/expected/2.2_test_in_2.3_quota_create_in_2.2.out @@ -0,0 +1,16 @@ +-- need run 2.3_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected/2.3_catalog.out b/upgrade_test/expected/2.3_catalog.out new file mode 100644 index 00000000000..0d74319bf04 --- /dev/null +++ b/upgrade_test/expected/2.3_catalog.out @@ -0,0 +1,310 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+---------------------------------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} + target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} +(17 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | {target_rowid_seq} | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.3.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.3.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.3.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.3.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.3.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.3.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.3.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.3.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.3.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.3.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.3.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.3.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.3.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.3.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.3.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.3.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.3.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation + + | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected/2.3_cleanup_quota.out b/upgrade_test/expected/2.3_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected/2.3_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected/2.3_install.out b/upgrade_test/expected/2.3_install.out new file mode 100644 index 00000000000..4738c064a82 --- /dev/null +++ b/upgrade_test/expected/2.3_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected/2.3_migrate_to_version_2.3.out b/upgrade_test/expected/2.3_migrate_to_version_2.3.out new file mode 100644 index 00000000000..bc14c46c4b5 --- /dev/null +++ b/upgrade_test/expected/2.3_migrate_to_version_2.3.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.3.so +Segment value: diskquota-2.3.so +\c +alter extension diskquota update to '2.3'; +\! sleep 5 diff --git a/upgrade_test/expected/2.3_set_quota.out b/upgrade_test/expected/2.3_set_quota.out new file mode 100644 index 00000000000..57dc9145492 --- /dev/null +++ b/upgrade_test/expected/2.3_set_quota.out @@ -0,0 +1,68 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Master value: diskquota-2.3.so +Segment value: diskquota-2.3.so +create extension diskquota with version '2.3'; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +NOTICE: resource queue required -- using default resource queue "pg_default" +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out b/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out new file mode 100644 index 00000000000..71c24e5865b --- /dev/null +++ b/upgrade_test/expected/2.3_test_in_2.2_quota_create_in_2.3.out @@ -0,0 +1,16 @@ +-- need run 2.2_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected7/2.2_catalog.out b/upgrade_test/expected7/2.2_catalog.out new file mode 100644 index 00000000000..48d2934a6c9 --- /dev/null +++ b/upgrade_test/expected7/2.2_catalog.out @@ -0,0 +1,308 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} +(16 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.2.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.2.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.2.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.2.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.2.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.2.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.2.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.2.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.2.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.2.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.2.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.2.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.2.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.2.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.2.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.2.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.2.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT DISTINCT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected7/2.2_cleanup_quota.out b/upgrade_test/expected7/2.2_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected7/2.2_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected7/2.2_install.out b/upgrade_test/expected7/2.2_install.out new file mode 100644 index 00000000000..c4b7f4c95ce --- /dev/null +++ b/upgrade_test/expected7/2.2_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected7/2.2_migrate_to_version_2.2.out b/upgrade_test/expected7/2.2_migrate_to_version_2.2.out new file mode 100644 index 00000000000..d6fbb96247b --- /dev/null +++ b/upgrade_test/expected7/2.2_migrate_to_version_2.2.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so +\c +alter extension diskquota update to '2.2'; +\! sleep 5 diff --git a/upgrade_test/expected7/2.2_set_quota.out b/upgrade_test/expected7/2.2_set_quota.out new file mode 100644 index 00000000000..5083f5747f2 --- /dev/null +++ b/upgrade_test/expected7/2.2_set_quota.out @@ -0,0 +1,72 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so +create extension diskquota with version '2.2'; +select diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out b/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out new file mode 100644 index 00000000000..aab1cb100c1 --- /dev/null +++ b/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out @@ -0,0 +1,16 @@ +-- need run 2.3_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/expected7/2.3_catalog.out b/upgrade_test/expected7/2.3_catalog.out new file mode 100644 index 00000000000..016aecd94c9 --- /dev/null +++ b/upgrade_test/expected7/2.3_catalog.out @@ -0,0 +1,308 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; + typname | typname +----------------------------------------+------------------------------------------------------- + diskquota_active_table_type | {int8,int2,oid} + quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} + rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} + rejectmap_entry | {bool,int4,oid,oid,oid} + rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} + relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} + show_all_relation_view | {oid,oid,oid,oid} + show_fast_database_size_view | {numeric} + show_fast_role_quota_view | {name,int8,oid,numeric} + show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_fast_schema_quota_view | {name,int8,oid,numeric} + show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} + show_segment_ratio_quota_view | {name,oid,float4} + state | {int4,int4,oid,tid,xid,xid,cid,cid} + table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} + target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} +(16 rows) + +-- types end +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; + relname | reltype | reloftype +-----------------------------+-------------------------------+----------- + diskquota_active_table_type | {diskquota_active_table_type} | + quota_config | {quota_config} | + quota_config_pkey | | + rejectmap_entry | {rejectmap_entry} | + rejectmap_entry_detail | {rejectmap_entry_detail} | + relation_cache_detail | {relation_cache_detail} | + state | {state} | + state_pkey | | + table_size | {table_size} | + table_size_pkey | | + target | {target} | + target_pkey | | + target_rowid_seq | | +(13 rows) + +-- tables end +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; + proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl +-----------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- + diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.3.so | + init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.3.so | + pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.3.so | + pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.3.so | + refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.3.so | + relation_size | {int8} | {regclass} | | | +| | + | | | | | SELECT SUM(size)::bigint FROM ( +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | + | | | | | UNION ALL +| | + | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | + | | | | | CASE WHEN EXISTS +| | + | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | + | | | | | relam) AS size +| | + | | | | | FROM pg_class as relstorage WHERE oid = relation +| | + | | | | | ) AS t | | + relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.3.so | + resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.3.so | + set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.3.so | + set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.3.so | + set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.3.so | + set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.3.so | + set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.3.so | + show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.3.so | + show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.3.so | + show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | + | | | | | WITH relation_cache AS ( +| | + | | | | | SELECT diskquota.show_relation_cache() AS a +| | + | | | | | FROM gp_dist_random('gp_id') +| | + | | | | | ) +| | + | | | | | SELECT (a).* FROM relation_cache; | | + show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.3.so | + status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.3.so | + wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.3.so | +(19 rows) + +-- UDF end +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; + schemaname | viewname | definition +------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + diskquota | rejectmap | SELECT bm.target_type, + + | | bm.target_oid, + + | | bm.database_oid, + + | | bm.tablespace_oid, + + | | bm.seg_exceeded, + + | | bm.dbnode, + + | | bm.spcnode, + + | | bm.relnode, + + | | bm.segid + + | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); + diskquota | show_all_relation_view | WITH relation_cache AS ( + + | | SELECT f.relid, + + | | f.primary_table_oid, + + | | f.auxrel_num, + + | | f.owneroid, + + | | f.namespaceoid, + + | | f.backendid, + + | | f.spcnode, + + | | f.dbnode, + + | | f.relnode, + + | | f.relstorage, + + | | f.auxrel_oid, + + | | f.relam + + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ + | | ) + + | | SELECT DISTINCT union_relation.oid, + + | | union_relation.relowner, + + | | union_relation.relnamespace, + + | | union_relation.reltablespace + + | | FROM ( SELECT relation_cache.relid AS oid, + + | | relation_cache.owneroid AS relowner, + + | | relation_cache.namespaceoid AS relnamespace, + + | | relation_cache.spcnode AS reltablespace + + | | FROM relation_cache + + | | UNION + + | | SELECT pg_class.oid, + + | | pg_class.relowner, + + | | pg_class.relnamespace, + + | | pg_class.reltablespace + + | | FROM pg_class) union_relation; + diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + + | | FROM pg_class + + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + + | | FROM diskquota.table_size + + | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; + diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | quota_config.targetoid AS role_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + + | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + + | | WHERE (quota_config.quotatype = 1); + diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relowner, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + + | | ) + + | | SELECT pg_roles.rolname AS role_name, + + | | full_quota_config.primaryoid AS role_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | quota_config.targetoid AS schema_oid, + + | | quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + + | | FROM ((diskquota.quota_config + + | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + + | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + + | | WHERE (quota_config.quotatype = 0); + diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + + | | SELECT pg_database.dattablespace + + | | FROM pg_database + + | | WHERE (pg_database.datname = current_database()) + + | | ), quota_usage AS ( + + | | SELECT show_all_relation_view.relnamespace, + + | | CASE + + | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + + | | ELSE show_all_relation_view.reltablespace + + | | END AS reltablespace, + + | | sum(table_size.size) AS total_size + + | | FROM diskquota.table_size, + + | | diskquota.show_all_relation_view, + + | | default_tablespace + + | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + + | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + + | | ), full_quota_config AS ( + + | | SELECT target.primaryoid, + + | | target.tablespaceoid, + + | | config.quotalimitmb + + | | FROM diskquota.quota_config config, + + | | diskquota.target target + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + + | | ) + + | | SELECT pg_namespace.nspname AS schema_name, + + | | full_quota_config.primaryoid AS schema_oid, + + | | pg_tablespace.spcname AS tablespace_name, + + | | full_quota_config.tablespaceoid AS tablespace_oid, + + | | full_quota_config.quotalimitmb AS quota_in_mb, + + | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + + | | FROM (((full_quota_config + + | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + + | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + + | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); + diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + + | | pg_tablespace.oid AS tablespace_oid, + + | | quota_config.segratio AS per_seg_quota_ratio + + | | FROM (diskquota.quota_config + + | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); +(8 rows) + +-- views end +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/expected7/2.3_cleanup_quota.out b/upgrade_test/expected7/2.3_cleanup_quota.out new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/expected7/2.3_cleanup_quota.out @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/expected7/2.3_install.out b/upgrade_test/expected7/2.3_install.out new file mode 100644 index 00000000000..4738c064a82 --- /dev/null +++ b/upgrade_test/expected7/2.3_install.out @@ -0,0 +1,13 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null +-- setup basic environment +\! createdb diskquota +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/expected7/2.3_migrate_to_version_2.3.out b/upgrade_test/expected7/2.3_migrate_to_version_2.3.out new file mode 100644 index 00000000000..db67a0e36dd --- /dev/null +++ b/upgrade_test/expected7/2.3_migrate_to_version_2.3.out @@ -0,0 +1,10 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null +\! gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so +\c +alter extension diskquota update to '2.3'; +\! sleep 5 diff --git a/upgrade_test/expected7/2.3_set_quota.out b/upgrade_test/expected7/2.3_set_quota.out new file mode 100644 index 00000000000..114f346dddf --- /dev/null +++ b/upgrade_test/expected7/2.3_set_quota.out @@ -0,0 +1,66 @@ +\!gpconfig -s 'shared_preload_libraries' +Values on all segments are consistent +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so +create extension diskquota with version '2.3'; +select diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); + set_schema_quota +------------------ + +(1 row) + +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); + set_role_quota +---------------- + +(1 row) + +insert into srole.b select generate_series(1,100000); -- ok. +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); + set_schema_tablespace_quota +----------------------------- + +(1 row) + +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); + set_role_tablespace_quota +--------------------------- + +(1 row) + +insert into rolespcrole.b select generate_series(1,100000); -- ok. +\!sleep 5 +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out b/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out new file mode 100644 index 00000000000..71c24e5865b --- /dev/null +++ b/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out @@ -0,0 +1,16 @@ +-- need run 2.2_set_quota before run this test +-- FIXME add version check here +\! sleep 5 +insert into s1.a select generate_series(1, 10000000); -- fail. +ERROR: schema's disk space quota exceeded with name: s1 +insert into srole.b select generate_series(1, 100000); -- fail. +ERROR: role's disk space quota exceeded with name: u1 +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded +insert into spcs1.a select generate_series(1, 100000); -- fail. +ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/schedule_2.2--2.3 b/upgrade_test/schedule_2.2--2.3 new file mode 100644 index 00000000000..486775836d8 --- /dev/null +++ b/upgrade_test/schedule_2.2--2.3 @@ -0,0 +1,8 @@ +test: 2.2_install +test: 2.2_set_quota +test: 2.2_catalog +test: 2.3_migrate_to_version_2.3 +test: 2.3_catalog +# run 2.2 behavior test using 2.3 DDL and binary +test: 2.2_test_in_2.3_quota_create_in_2.2 +test: 2.2_cleanup_quota diff --git a/upgrade_test/schedule_2.3--2.2 b/upgrade_test/schedule_2.3--2.2 new file mode 100644 index 00000000000..0de828c96e1 --- /dev/null +++ b/upgrade_test/schedule_2.3--2.2 @@ -0,0 +1,8 @@ +test: 2.3_install +test: 2.3_set_quota +test: 2.3_catalog +test: 2.2_migrate_to_version_2.2 +test: 2.2_catalog +# run 2.3 behavior test using 2.2 DDL and binary +test: 2.3_test_in_2.2_quota_create_in_2.3 +test: 2.3_cleanup_quota diff --git a/upgrade_test/sql/2.2_set_quota.sql b/upgrade_test/sql/2.2_set_quota.sql index adaf8707508..8ccb3a80d44 100644 --- a/upgrade_test/sql/2.2_set_quota.sql +++ b/upgrade_test/sql/2.2_set_quota.sql @@ -1,7 +1,8 @@ \!gpconfig -s 'shared_preload_libraries' create extension diskquota with version '2.2'; -\!sleep 5 +select diskquota.init_table_size_table(); +select diskquota.wait_for_worker_new_epoch(); -- schema quota create schema s1; diff --git a/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql b/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql new file mode 100644 index 00000000000..e67027c7e6f --- /dev/null +++ b/upgrade_test/sql/2.2_test_in_2.3_quota_create_in_2.2.sql @@ -0,0 +1,16 @@ +-- need run 2.3_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; diff --git a/upgrade_test/sql/2.3_catalog.sql b/upgrade_test/sql/2.3_catalog.sql new file mode 100644 index 00000000000..ebf5f00aa56 --- /dev/null +++ b/upgrade_test/sql/2.3_catalog.sql @@ -0,0 +1,81 @@ +CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' + WITH io AS ( + SELECT x.i AS index, x.o AS type_id FROM ( + SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o + ) AS x + ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; +' LANGUAGE sql STABLE; + +-- types +SELECT + t1.typname, + array_agg(t2.typname order by a.atttypid) typname +FROM + pg_namespace n, + pg_class c, + pg_type t1, + pg_type t2, + pg_attribute a +WHERE + n.nspname = 'diskquota' + AND c.oid = t1.typrelid + AND n.oid = t1.typnamespace + AND a.attrelid = c.oid + AND t2.oid = a.atttypid +GROUP BY + t1.typname +ORDER BY + t1.typname; +-- types end + +-- tables +SELECT + relname, + typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, + typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype +FROM + pg_class c, + pg_namespace n +WHERE + c.relnamespace = n.oid + AND n.nspname = 'diskquota' + and c.relkind != 'v' +ORDER BY + relname; +-- tables end + +-- UDF +SELECT + proname, + typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, + typeid_to_name(proargtypes) as proargtypes, + typeid_to_name(proallargtypes) as proallargtypes, + proargmodes, + prosrc, + probin, + proacl +FROM + pg_namespace n, + pg_proc p +WHERE + n.nspname = 'diskquota' + AND n.oid = p.pronamespace + AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable +ORDER BY + proname; +-- UDF end + +-- views +SELECT + schemaname, + viewname, + definition +FROM + pg_views +WHERE + schemaname = 'diskquota' +ORDER by + schemaname, viewname; +-- views end + +DROP FUNCTION typeid_to_name (oid[]); diff --git a/upgrade_test/sql/2.3_cleanup_quota.sql b/upgrade_test/sql/2.3_cleanup_quota.sql new file mode 100644 index 00000000000..3935d709fd9 --- /dev/null +++ b/upgrade_test/sql/2.3_cleanup_quota.sql @@ -0,0 +1 @@ +drop extension diskquota; diff --git a/upgrade_test/sql/2.3_install.sql b/upgrade_test/sql/2.3_install.sql new file mode 100644 index 00000000000..03020f08b58 --- /dev/null +++ b/upgrade_test/sql/2.3_install.sql @@ -0,0 +1,17 @@ +-- cleanup previous diskquota installation +\! gpconfig -c shared_preload_libraries -v '' > /dev/null +\! gpstop -raf > /dev/null +\! dropdb --if-exists diskquota + +-- TODO reset all diskquota GUC +\! gpstop -raf > /dev/null + +-- setup basic environment +\! createdb diskquota + +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null + +-- TODO setup GUC +\! gpconfig -c diskquota.naptime -v '1' > /dev/null +\! gpstop -raf > /dev/null diff --git a/upgrade_test/sql/2.3_migrate_to_version_2.3.sql b/upgrade_test/sql/2.3_migrate_to_version_2.3.sql new file mode 100644 index 00000000000..f6ce2141d74 --- /dev/null +++ b/upgrade_test/sql/2.3_migrate_to_version_2.3.sql @@ -0,0 +1,8 @@ +\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null +\! gpstop -raf > /dev/null + +\! gpconfig -s 'shared_preload_libraries' + +\c +alter extension diskquota update to '2.3'; +\! sleep 5 diff --git a/upgrade_test/sql/2.3_set_quota.sql b/upgrade_test/sql/2.3_set_quota.sql new file mode 100644 index 00000000000..482841550cb --- /dev/null +++ b/upgrade_test/sql/2.3_set_quota.sql @@ -0,0 +1,44 @@ +\!gpconfig -s 'shared_preload_libraries' + +create extension diskquota with version '2.3'; +select diskquota.wait_for_worker_new_epoch(); + +-- schema quota +create schema s1; +select diskquota.set_schema_quota('s1', '1 MB'); +create table s1.a(i int) distributed by (i); +insert into s1.a select generate_series(1, 10000000); -- ok. + +-- role quota +create schema srole; +create role u1 nologin; +create table srole.b (t text) distributed by (t); +alter table srole.b owner to u1; +select diskquota.set_role_quota('u1', '1 MB'); +insert into srole.b select generate_series(1,100000); -- ok. + +-- schema tablespace quota +\! mkdir -p /tmp/schemaspc +create schema spcs1; +create tablespace schemaspc location '/tmp/schemaspc'; +select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); +create table spcs1.a(i int) tablespace schemaspc distributed by (i); +insert into spcs1.a select generate_series(1,100000); -- ok. + +-- role tablespace quota +\! mkdir -p /tmp/rolespc +create tablespace rolespc location '/tmp/rolespc'; +create role rolespcu1 nologin; +create schema rolespcrole; +create table rolespcrole.b (t text) tablespace rolespc distributed by (t); +alter table rolespcrole.b owner to rolespcu1; +select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); +insert into rolespcrole.b select generate_series(1,100000); -- ok. + +\!sleep 5 + +-- leaked resource: +-- role u1, rolespcu1 +-- table s1.a, srole.b spcs1.a, rolespcrole.b +-- schema s1, srole, spcs1, rolespcrole +-- tablespace schemaspc, rolespc diff --git a/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql b/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql new file mode 100644 index 00000000000..4a599cfb3c3 --- /dev/null +++ b/upgrade_test/sql/2.3_test_in_2.2_quota_create_in_2.3.sql @@ -0,0 +1,16 @@ +-- need run 2.2_set_quota before run this test +-- FIXME add version check here + +\! sleep 5 + +insert into s1.a select generate_series(1, 10000000); -- fail. +insert into srole.b select generate_series(1, 100000); -- fail. + +insert into rolespcrole.b select generate_series(1, 100000); -- fail. +insert into spcs1.a select generate_series(1, 100000); -- fail. + +drop table s1.a, srole.b, spcs1.a, rolespcrole.b; +drop schema s1, srole, spcs1, rolespcrole; +drop tablespace rolespc; +drop tablespace schemaspc; +drop role u1, rolespcu1; From 13653b20e575c3f10d51ee1b18ed13d6e9d37f8d Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 24 Aug 2023 14:05:55 +0800 Subject: [PATCH 304/330] Enable gpactivatestandby test (#363) Fix the standby test on GP7. --- concourse/scripts/test_diskquota.sh | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 6abe35d04d6..686d279c12c 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -4,13 +4,14 @@ set -exo pipefail function activate_standby() { gpstop -may -M immediate + export MASTER_DATA_DIRECTORY=$(readlink /home/gpadmin/gpdb_src)/gpAux/gpdemo/datadirs/standby if [[ $PGPORT -eq 6000 ]] then export PGPORT=6001 else export PGPORT=7001 + export COORDINATOR_DATA_DIRECTORY=$MASTER_DATA_DIRECTORY fi - export MASTER_DATA_DIRECTORY=/home/gpadmin/gpdb_src/gpAux/gpdemo/datadirs/standby gpactivatestandby -a -f -d $MASTER_DATA_DIRECTORY } @@ -32,12 +33,8 @@ function _main() { export SHOW_REGRESS_DIFF=1 time cmake --build . --target installcheck # Run test again with standby master - # FIXME: enable test for GPDB7 - if [[ $PGPORT -eq 6000 ]] - then - activate_standby - time cmake --build . --target installcheck - fi + activate_standby + time cmake --build . --target installcheck # Run upgrade test (with standby master) time cmake --build . --target upgradecheck popd From c7acb6be6e9873239bf109bbd7b7692563bd6df7 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 24 Aug 2023 17:17:40 +0800 Subject: [PATCH 305/330] Add gpdb7 job back to pipeline. (#367) --- concourse/pipeline/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concourse/pipeline/release.yml b/concourse/pipeline/release.yml index 6b12b935f29..023e86bd88f 100644 --- a/concourse/pipeline/release.yml +++ b/concourse/pipeline/release.yml @@ -20,7 +20,7 @@ #@ centos7_gpdb6_conf(release_build=True), #@ rhel8_gpdb6_conf(release_build=True), #@ ubuntu18_gpdb6_conf(release_build=True), -#! #@ rhel8_gpdb7_conf(release_build=True) +#@ rhel8_gpdb7_conf(release_build=True) #@ ] jobs: #@ param = { From 7358d615a2f79f6a123aa06cfb652ddfef1b4229 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 25 Aug 2023 16:47:04 +0800 Subject: [PATCH 306/330] fix creating extension with global option appendonly=true. (#366) All tables in diskquota use primary keys. Unique index tables are created for primary keys. Unique index tables can not be created for tables with the option appendonly=true. This issue only occurs in gp6 since `set gp_default_storage_options='appendonly=true';` is disabled in gp7. Solution: add `appendonly=false` to each table with the primary key. Co-authored-by: Evgeniy Ratkov --- control/ddl/diskquota--2.3.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/control/ddl/diskquota--2.3.sql b/control/ddl/diskquota--2.3.sql index 8be7749f1aa..bf2e7411fa6 100644 --- a/control/ddl/diskquota--2.3.sql +++ b/control/ddl/diskquota--2.3.sql @@ -11,7 +11,7 @@ CREATE TABLE diskquota.quota_config( quotalimitMB int8, segratio float4 DEFAULT 0, PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); +) WITH (appendonly=false) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( rowId serial, @@ -19,19 +19,19 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, --REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); CREATE TABLE diskquota.table_size( tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); +) WITH (appendonly=false) DISTRIBUTED BY (tableid, segid); CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -) DISTRIBUTED BY (state); +) WITH (appendonly=false) DISTRIBUTED BY (state); -- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); From 3224bee1da8385d301266b9485408c552862a359 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 8 Sep 2023 14:54:50 +0800 Subject: [PATCH 307/330] Fix continuous upgrade (#368) During the upgrade procedure, when diskquota.so is not in shared_preload_libraries, check whether the current version is the final version: - If yes, throw an error to the user. - Otherwise, return from _PG_init(). Co-authored-by: Sasasu --- src/diskquota.c | 88 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 84ac5e076ea..e944045a850 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -148,7 +148,8 @@ static void free_bgworker_handle(uint32 worker_id); /* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); #endif /* GP_VERSION_NUM */ -static bool is_altering_extension(void); +static bool is_altering_extension_to_default_version(char *version); +static bool check_alter_extension(void); /* * diskquota_launcher_shmem_size @@ -166,8 +167,37 @@ diskquota_launcher_shmem_size(void) return size; } +/* + * Check whether altering the extension to the default version. + */ static bool -is_altering_extension(void) +is_altering_extension_to_default_version(char *version) +{ + int spi_ret; + bool ret = false; + SPI_connect(); + spi_ret = SPI_execute("select default_version from pg_available_extensions where name ='diskquota'", true, 0); + if (spi_ret != SPI_OK_SELECT) + elog(ERROR, "[diskquota] failed to select diskquota default version during diskquota update."); + if (SPI_processed > 0) + { + HeapTuple tup = SPI_tuptable->vals[0]; + Datum dat; + bool isnull; + + dat = SPI_getbinval(tup, SPI_tuptable->tupdesc, 1, &isnull); + if (!isnull) + { + char *default_version = DatumGetCString(dat); + if (strcmp(version, default_version) == 0) ret = true; + } + } + SPI_finish(); + return ret; +} + +static bool +check_alter_extension(void) { if (ActivePortal == NULL) return false; /* QD: When the sourceTag is T_AlterExtensionStmt, then return true */ @@ -178,25 +208,43 @@ is_altering_extension(void) * If the sourceText contains 'alter extension diskquota update', we consider it is * a alter extension query. */ - char *query = asc_tolower(ActivePortal->sourceText, strlen(ActivePortal->sourceText)); - char *pos; - bool match = true; + char *query = asc_tolower(ActivePortal->sourceText, strlen(ActivePortal->sourceText)); + char *pos = query; + bool ret = true; + static char *regs[] = {"alter", "extension", "diskquota", "update"}; + int i; + + /* Check whether the sql statement is alter extension. */ + for (i = 0; i < sizeof(regs) / sizeof(char *); i++) + { + pos = strstr(pos, regs[i]); + if (pos == 0) + { + ret = false; + break; + } + } + + /* + * If the current version is the final version, which is altered, + * we need to throw an error to the user. + */ + if (ret) + { + /* + * If version is set in alter extension statement, then compare the current version + * with the version in this statement. Otherwise, compare the current version with + * the default version of diskquota. + */ + pos = strstr(pos, "to"); + if (pos) + ret = strstr(pos, DISKQUOTA_VERSION) != 0; + else + ret = is_altering_extension_to_default_version(DISKQUOTA_VERSION); + } - pos = strstr(query, "alter"); - if (pos) - pos = strstr(pos, "extension"); - else - match = false; - if (pos) - pos = strstr(pos, "diskquota"); - else - match = false; - if (pos) - pos = strstr(pos, "update"); - else - match = false; pfree(query); - return match; + return ret; } /* @@ -216,7 +264,7 @@ _PG_init(void) * To support the continuous upgrade/downgrade, we should skip the library * check in _PG_init() during upgrade/downgrade. */ - if (IsNormalProcessingMode() && is_altering_extension()) + if (IsNormalProcessingMode() && check_alter_extension()) { ereport(LOG, (errmsg("[diskquota] altering diskquota version to " DISKQUOTA_VERSION "."))); return; From 7fa38c25c28de15a118c20bd46660d5a4ca324b8 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 12 Sep 2023 14:48:13 +0800 Subject: [PATCH 308/330] Add GUC diskquota.max_monitored_databases. (#369) This GUC shows the max number of database diskquota can monitor. --- src/diskquota.c | 47 +++++---- src/diskquota.h | 5 +- src/quotamodel.c | 13 ++- tests/regress/diskquota_schedule | 1 + tests/regress/expected/test_max_database.out | 104 +++++++++++++++++++ tests/regress/sql/test_max_database.sql | 48 +++++++++ 6 files changed, 188 insertions(+), 30 deletions(-) create mode 100644 tests/regress/expected/test_max_database.out create mode 100644 tests/regress/sql/test_max_database.sql diff --git a/src/diskquota.c b/src/diskquota.c index e944045a850..4f62778a23f 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -70,12 +70,13 @@ static volatile sig_atomic_t got_sigusr1 = false; static volatile sig_atomic_t got_sigusr2 = false; /* GUC variables */ -int diskquota_naptime = 0; -int diskquota_max_active_tables = 0; -int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ -bool diskquota_hardlimit = false; -int diskquota_max_workers = 10; -int diskquota_max_table_segments = 0; +int diskquota_naptime = 0; +int diskquota_max_active_tables = 0; +int diskquota_worker_timeout = 60; /* default timeout is 60 seconds */ +bool diskquota_hardlimit = false; +int diskquota_max_workers = 10; +int diskquota_max_table_segments = 0; +int diskquota_max_monitored_databases = 0; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -161,9 +162,10 @@ diskquota_launcher_shmem_size(void) Size size; size = MAXALIGN(sizeof(DiskquotaLauncherShmemStruct)); - size = add_size(size, mul_size(diskquota_max_workers, - sizeof(struct DiskQuotaWorkerEntry))); // hidden memory for DiskQuotaWorkerEntry - size = add_size(size, mul_size(MAX_NUM_MONITORED_DB, sizeof(struct DiskquotaDBEntry))); // hidden memory for dbArray + // hidden memory for DiskQuotaWorkerEntry + size = add_size(size, mul_size(diskquota_max_workers, sizeof(struct DiskQuotaWorkerEntry))); + // hidden memory for dbArray + size = add_size(size, mul_size(diskquota_max_monitored_databases, sizeof(struct DiskquotaDBEntry))); return size; } @@ -401,9 +403,10 @@ define_guc_variables(void) "Max number of backgroud workers to run diskquota extension, should be less than max_worker_processes.", NULL, &diskquota_max_workers, 10, 1, 20, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_table_segments", "Max number of tables segments on the cluster.", NULL, - &diskquota_max_table_segments, 10 * 1024 * 1024, - INIT_NUM_TABLE_SIZE_ENTRIES * MAX_NUM_MONITORED_DB, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, - NULL); + &diskquota_max_table_segments, 10 * 1024 * 1024, INIT_NUM_TABLE_SIZE_ENTRIES * 1024, + INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_monitored_databases", "Max number of database on the cluster.", NULL, + &diskquota_max_monitored_databases, 50, 1, 1024, PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -1096,10 +1099,10 @@ init_database_list(void) if (dbEntry == NULL) continue; num++; /* - * diskquota only supports to monitor at most MAX_NUM_MONITORED_DB + * diskquota only supports to monitor at most diskquota_max_monitored_databases * databases */ - if (num >= MAX_NUM_MONITORED_DB) + if (num >= diskquota_max_monitored_databases) { ereport(LOG, (errmsg("[diskquota launcher] diskquota monitored database limit is reached, database(oid:%u) " "will not enable diskquota", @@ -1109,7 +1112,7 @@ init_database_list(void) } num_db = num; /* As update_monitor_db_mpp needs to execute sql, so can not put in the loop above */ - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + for (int i = 0; i < diskquota_max_monitored_databases; i++) { DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; if (dbEntry->in_use) @@ -1291,7 +1294,7 @@ do_process_extension_ddl_message(MessageResult *code, ExtensionDDLMessage local_ static void on_add_db(Oid dbid, MessageResult *code) { - if (num_db >= MAX_NUM_MONITORED_DB) + if (num_db >= diskquota_max_monitored_databases) { *code = ERR_EXCEED; ereport(ERROR, (errmsg("[diskquota launcher] too many databases to monitor"))); @@ -1769,7 +1772,7 @@ init_launcher_shmem() // get dbArray from the hidden memory DiskquotaDBEntry *dbArray = (DiskquotaDBEntry *)hidden_memory_prt; - hidden_memory_prt += mul_size(MAX_NUM_MONITORED_DB, sizeof(struct DiskquotaDBEntry)); + hidden_memory_prt += mul_size(diskquota_max_monitored_databases, sizeof(struct DiskquotaDBEntry)); // get the dbArrayTail from the hidden memory DiskquotaDBEntry *dbArrayTail = (DiskquotaDBEntry *)hidden_memory_prt; @@ -1785,7 +1788,7 @@ init_launcher_shmem() DiskquotaLauncherShmem->dbArray = dbArray; DiskquotaLauncherShmem->dbArrayTail = dbArrayTail; - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + for (int i = 0; i < diskquota_max_monitored_databases; i++) { memset(&DiskquotaLauncherShmem->dbArray[i], 0, sizeof(DiskquotaDBEntry)); DiskquotaLauncherShmem->dbArray[i].id = i; @@ -1813,7 +1816,7 @@ add_db_entry(Oid dbid) LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); /* if there is already dbEntry's dbid equals dbid, returning the existing one */ - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + for (int i = 0; i < diskquota_max_monitored_databases; i++) { DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; if (!dbEntry->in_use && result == NULL) @@ -1843,7 +1846,7 @@ static void release_db_entry(Oid dbid) { DiskquotaDBEntry *db = NULL; - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + for (int i = 0; i < diskquota_max_monitored_databases; i++) { DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; if (dbEntry->in_use && dbEntry->dbid == dbid) @@ -1890,9 +1893,9 @@ next_db(DiskquotaDBEntry *curDB) */ StartTransactionCommand(); LWLockAcquire(diskquota_locks.dblist_lock, LW_SHARED); - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + for (int i = 0; i < diskquota_max_monitored_databases; i++) { - if (nextSlot >= MAX_NUM_MONITORED_DB) nextSlot = 0; + if (nextSlot >= diskquota_max_monitored_databases) nextSlot = 0; DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; nextSlot++; if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; diff --git a/src/diskquota.h b/src/diskquota.h index 8e46d31732a..70eaf8d3bf9 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -46,7 +46,6 @@ typedef enum } DiskquotaHashFunction; /* max number of monitored database with diskquota enabled */ -#define MAX_NUM_MONITORED_DB 50 #define LAUNCHER_SCHEMA "diskquota_utility" #define EXTENSION_SCHEMA "diskquota" extern int diskquota_worker_timeout; @@ -194,12 +193,12 @@ typedef struct { dlist_head freeWorkers; // a list of DiskQuotaWorkerEntry dlist_head runningWorkers; // a list of DiskQuotaWorkerEntry - DiskquotaDBEntry *dbArray; // size == MAX_NUM_MONITORED_DB + DiskquotaDBEntry *dbArray; // size == diskquota_max_monitored_databases DiskquotaDBEntry *dbArrayTail; volatile bool isDynamicWorker; /* DiskQuotaWorkerEntry worker[diskquota_max_workers]; // the hidden memory to store WorkerEntry - DiskquotaDBEntry dbentry[MAX_NUM_MONITORED_DB]; // the hidden memory for dbentry + DiskquotaDBEntry dbentry[diskquota_max_monitored_databases]; // the hidden memory for dbentry */ } DiskquotaLauncherShmemStruct; diff --git a/src/quotamodel.c b/src/quotamodel.c index 6b8507b3810..6e5c4c3e735 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -89,6 +89,7 @@ typedef struct LocalRejectMapEntry LocalRejectMapEntry; int SEGCOUNT = 0; extern int diskquota_max_table_segments; extern pg_atomic_uint32 *diskquota_table_size_entry_num; +extern int diskquota_max_monitored_databases; /* * local cache of table disk size and corresponding schema and owner. @@ -461,8 +462,9 @@ disk_quota_shmem_startup(void) hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(struct MonitorDBEntryStruct); - monitored_dbid_cache = DiskquotaShmemInitHash("table oid cache which shoud tracking", MAX_NUM_MONITORED_DB, - MAX_NUM_MONITORED_DB, &hash_ctl, HASH_ELEM, DISKQUOTA_OID_HASH); + monitored_dbid_cache = + DiskquotaShmemInitHash("table oid cache which shoud tracking", diskquota_max_monitored_databases, + diskquota_max_monitored_databases, &hash_ctl, HASH_ELEM, DISKQUOTA_OID_HASH); init_launcher_shmem(); LWLockRelease(AddinShmemInitLock); } @@ -508,7 +510,8 @@ static Size diskquota_worker_shmem_size() { Size size; - size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / MAX_NUM_MONITORED_DB + 100, sizeof(TableSizeEntry)); + size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / diskquota_max_monitored_databases + 100, + sizeof(TableSizeEntry)); size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); size = add_size(size, hash_estimate_size(MAX_QUOTA_MAP_ENTRIES * NUM_QUOTA_TYPES, sizeof(struct QuotaMapEntry))); return size; @@ -528,14 +531,14 @@ DiskQuotaShmemSize(void) size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelidCacheEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(Oid))); - size = add_size(size, hash_estimate_size(MAX_NUM_MONITORED_DB, + size = add_size(size, hash_estimate_size(diskquota_max_monitored_databases, sizeof(struct MonitorDBEntryStruct))); // monitored_dbid_cache if (IS_QUERY_DISPATCHER()) { size = add_size(size, diskquota_launcher_shmem_size()); size = add_size(size, sizeof(pg_atomic_uint32)); - size = add_size(size, diskquota_worker_shmem_size() * MAX_NUM_MONITORED_DB); + size = add_size(size, diskquota_worker_shmem_size() * diskquota_max_monitored_databases); } return size; diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index c3a25ff43f0..f4f1f35a245 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,5 +1,6 @@ test: config test: test_create_extension +test: test_max_database test: test_readiness_logged test: test_init_table_size_table test: test_relation_size diff --git a/tests/regress/expected/test_max_database.out b/tests/regress/expected/test_max_database.out new file mode 100644 index 00000000000..e928f6a1356 --- /dev/null +++ b/tests/regress/expected/test_max_database.out @@ -0,0 +1,104 @@ +--start_ignore +\! gpconfig -c diskquota.max_database -v 3 +20230905:12:39:55:332748 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_database -v 3' +\! gpstop -ari +--end_ignore +\c +DROP DATABASE IF EXISTS test_db1; +NOTICE: database "test_db1" does not exist, skipping +DROP DATABASE IF EXISTS test_db2; +NOTICE: database "test_db2" does not exist, skipping +DROP DATABASE IF EXISTS test_db3; +NOTICE: database "test_db3" does not exist, skipping +CREATE DATABASE test_db1; +CREATE DATABASE test_db2; +CREATE DATABASE test_db3; +\c test_db1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +\c test_db2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +\c test_db3 +CREATE EXTENSION diskquota; +ERROR: [diskquota] failed to create diskquota extension: too many databases to monitor (diskquota_utility.c:406) +-- clean extension +\c test_db1 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +\c test_db2 +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +DROP EXTENSION diskquota; +-- clean database +\c contrib_regression +DROP DATABASE test_db1; +DROP DATABASE test_db2; +DROP DATABASE test_db3; +-- start_ignore +\! gpconfig -r diskquota.max_database +20230905:12:40:29:350921 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_database' +\! gpstop -ari +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -ari +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Obtaining Greenplum Master catalog information +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Obtaining Segment details from master... +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Greenplum Version: 'postgres (Greenplum Database) 6.24.4+dev.45.gad3671f087 build dev' +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing Master instance shutdown with mode='immediate' +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Master segment instance directory=/home/zhrt/workspace/gpdb6/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Attempting forceful termination of any leftover master process +20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Terminating processes for segment /home/zhrt/workspace/gpdb6/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20230905:12:40:37:352551 gpstop:zhrt:zhrt-[INFO]:-Stopping master standby host zhrt mode=immediate +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown standby process on zhrt +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20230905:12:40:38:352551 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20230905:12:40:43:352551 gpstop:zhrt:zhrt-[INFO]:-0.00% of jobs completed +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-100.00% of jobs completed +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:- Segments stopped successfully = 6 +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:- Segments with errors during stop = 0 +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:----------------------------------------------------- +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Successfully shutdown 6 of 6 segment instances +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Database successfully shutdown with no errors reported +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover gpmmon process +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-No leftover gpmmon process found +20230905:12:40:46:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover gpsmon processes +20230905:12:40:47:352551 gpstop:zhrt:zhrt-[INFO]:-No leftover gpsmon processes on some hosts. not attempting forceful termination on these hosts +20230905:12:40:47:352551 gpstop:zhrt:zhrt-[INFO]:-Cleaning up leftover shared memory +20230905:12:40:48:352551 gpstop:zhrt:zhrt-[INFO]:-Restarting System... +-- end_ignore diff --git a/tests/regress/sql/test_max_database.sql b/tests/regress/sql/test_max_database.sql new file mode 100644 index 00000000000..df69b46b800 --- /dev/null +++ b/tests/regress/sql/test_max_database.sql @@ -0,0 +1,48 @@ +--start_ignore +\! gpconfig -c diskquota.max_database -v 3 +\! gpstop -ari +--end_ignore + +\c + +DROP DATABASE IF EXISTS test_db1; +DROP DATABASE IF EXISTS test_db2; +DROP DATABASE IF EXISTS test_db3; + +CREATE DATABASE test_db1; +CREATE DATABASE test_db2; +CREATE DATABASE test_db3; + +\c test_db1 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +\c test_db2 +CREATE EXTENSION diskquota; +SELECT diskquota.wait_for_worker_new_epoch(); + +-- expect fail +\c test_db3 +CREATE EXTENSION diskquota; + +-- clean extension +\c test_db1 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +\c test_db2 +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); +DROP EXTENSION diskquota; + +-- clean database +\c contrib_regression +DROP DATABASE test_db1; +DROP DATABASE test_db2; +DROP DATABASE test_db3; + +-- start_ignore +\! gpconfig -r diskquota.max_database +\! gpstop -ari +-- end_ignore \ No newline at end of file From dfc94bf671249eec62eadda78c81744e6628b1cb Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 12 Sep 2023 15:16:26 +0800 Subject: [PATCH 309/330] Fix wrong test case (#374) --- tests/regress/diskquota_schedule | 2 +- ..._max_database.out => test_max_monitored_databases.out} | 8 ++++---- ..._max_database.sql => test_max_monitored_databases.sql} | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) rename tests/regress/expected/{test_max_database.out => test_max_monitored_databases.out} (95%) rename tests/regress/sql/{test_max_database.sql => test_max_monitored_databases.sql} (89%) diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index f4f1f35a245..98256f3435f 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -1,6 +1,6 @@ test: config test: test_create_extension -test: test_max_database +test: test_max_monitored_databases test: test_readiness_logged test: test_init_table_size_table test: test_relation_size diff --git a/tests/regress/expected/test_max_database.out b/tests/regress/expected/test_max_monitored_databases.out similarity index 95% rename from tests/regress/expected/test_max_database.out rename to tests/regress/expected/test_max_monitored_databases.out index e928f6a1356..84568bc114a 100644 --- a/tests/regress/expected/test_max_database.out +++ b/tests/regress/expected/test_max_monitored_databases.out @@ -1,6 +1,6 @@ --start_ignore -\! gpconfig -c diskquota.max_database -v 3 -20230905:12:39:55:332748 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_database -v 3' +\! gpconfig -c diskquota.max_monitored_databases -v 3 +20230905:12:39:55:332748 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-c diskquota.max_monitored_databases -v 3' \! gpstop -ari --end_ignore \c @@ -68,8 +68,8 @@ DROP DATABASE test_db1; DROP DATABASE test_db2; DROP DATABASE test_db3; -- start_ignore -\! gpconfig -r diskquota.max_database -20230905:12:40:29:350921 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_database' +\! gpconfig -r diskquota.max_monitored_databases +20230905:12:40:29:350921 gpconfig:zhrt:zhrt-[INFO]:-completed successfully with parameters '-r diskquota.max_monitored_databases' \! gpstop -ari 20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Starting gpstop with args: -ari 20230905:12:40:30:352551 gpstop:zhrt:zhrt-[INFO]:-Gathering information and validating the environment... diff --git a/tests/regress/sql/test_max_database.sql b/tests/regress/sql/test_max_monitored_databases.sql similarity index 89% rename from tests/regress/sql/test_max_database.sql rename to tests/regress/sql/test_max_monitored_databases.sql index df69b46b800..f0e2e8c1aa9 100644 --- a/tests/regress/sql/test_max_database.sql +++ b/tests/regress/sql/test_max_monitored_databases.sql @@ -1,5 +1,5 @@ --start_ignore -\! gpconfig -c diskquota.max_database -v 3 +\! gpconfig -c diskquota.max_monitored_databases -v 3 \! gpstop -ari --end_ignore @@ -43,6 +43,6 @@ DROP DATABASE test_db2; DROP DATABASE test_db3; -- start_ignore -\! gpconfig -r diskquota.max_database +\! gpconfig -r diskquota.max_monitored_databases \! gpstop -ari -- end_ignore \ No newline at end of file From d302f16ca1da3b6c38ba1edbc05280076c1a66e4 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 12 Sep 2023 17:33:39 +0800 Subject: [PATCH 310/330] Fix compilation issue caused by GP7 (#375) - Fix the compilation issue caused by GP7. `libpq.so` needs to be linked. See https://github.com/greenplum-db/gpdb/pull/16234. - Add GP7 jobs back to the pipeline. --- CMakeLists.txt | 4 ++++ concourse/pipeline/commit.yml | 2 +- concourse/pipeline/pr.yml | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d12fa4480a9..75be74a9f3d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,6 +85,10 @@ set_target_properties( C_STANDARD 99 LINKER_LANGUAGE "C") +if (${GP_MAJOR_VERSION} STRGREATER_EQUAL "7") + TARGET_LINK_LIBRARIES(diskquota ${PG_LIB_DIR}/libpq.so) +endif() + # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml index 1891caeba76..823dfcc2868 100644 --- a/concourse/pipeline/commit.yml +++ b/concourse/pipeline/commit.yml @@ -19,7 +19,7 @@ #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), #@ ubuntu18_gpdb6_conf(), -#! #@ rhel8_gpdb7_conf(), +#@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml index 3578131aa8c..4a715120c24 100644 --- a/concourse/pipeline/pr.yml +++ b/concourse/pipeline/pr.yml @@ -22,7 +22,7 @@ #@ centos7_gpdb6_conf(), #@ rhel8_gpdb6_conf(), #@ ubuntu18_gpdb6_conf(), -#! #@ rhel8_gpdb7_conf(), +#@ rhel8_gpdb7_conf(), #@ ] jobs: #@ param = { From 547e5d18fa72cd7d26fe11b4073e07b493a65471 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 13 Sep 2023 14:19:36 +0800 Subject: [PATCH 311/330] Add GUC diskquota.max_quotas (#372) `diskquota.max_quotas` shows the max number of quotas on the cluster. In the current version of diskquota, quotas means: - role - schema - role_tablespace - schema_tablespace The number of quota = role + schema + (role * tablespace) + (schema * tablespace). --- src/diskquota.c | 11 +++++++++ src/quotamodel.c | 64 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 4f62778a23f..45df2f80d48 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -77,6 +77,7 @@ bool diskquota_hardlimit = false; int diskquota_max_workers = 10; int diskquota_max_table_segments = 0; int diskquota_max_monitored_databases = 0; +int diskquota_max_quotas = 0; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -91,6 +92,9 @@ static int num_db = 0; /* how many TableSizeEntry are maintained in all the table_size_map in shared memory*/ pg_atomic_uint32 *diskquota_table_size_entry_num; +/* how many QuotaMapEntry are maintained in all the quota_info[type].map in shared memory*/ +pg_atomic_uint32 *diskquota_quota_info_entry_num; + static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; #define MIN_SLEEPTIME 100 /* milliseconds */ @@ -407,6 +411,8 @@ define_guc_variables(void) INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_monitored_databases", "Max number of database on the cluster.", NULL, &diskquota_max_monitored_databases, 50, 1, 1024, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_quotas", "Max number of quotas on the cluster.", NULL, &diskquota_max_quotas, + 1024 * 1024, 1024 * NUM_QUOTA_TYPES, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ @@ -1799,6 +1805,11 @@ init_launcher_shmem() diskquota_table_size_entry_num = ShmemInitStruct("diskquota TableSizeEntry counter", sizeof(pg_atomic_uint32), &found); if (!found) pg_atomic_init_u32(diskquota_table_size_entry_num, 0); + + /* init QuotaInfoEntry counter */ + diskquota_quota_info_entry_num = + ShmemInitStruct("diskquota QuotaInfoEntry counter", sizeof(pg_atomic_uint32), &found); + if (!found) pg_atomic_init_u32(diskquota_quota_info_entry_num, 0); } /* diff --git a/src/quotamodel.c b/src/quotamodel.c index 6e5c4c3e735..9e5541ec6be 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -55,8 +55,11 @@ #define NUM_QUOTA_CONFIG_ATTRS 6 /* Number of entries for diskquota.table_size update SQL */ #define SQL_MAX_VALUES_NUMBER 1000000 +/* Inital number of entries for hash table in quota_info */ +#define INIT_QUOTA_MAP_ENTRIES (128) +#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quotas / (diskquota_max_monitored_databases * NUM_QUOTA_TYPES)) /* Number of entries for hash table in quota_info */ -#define MAX_QUOTA_MAP_ENTRIES (128 * 1024L) +#define MAX_QUOTA_MAP_ENTRIES (AVG_QUOTA_MAP_ENTRIES < 1024 ? 1024 : AVG_QUOTA_MAP_ENTRIES) /* TableSizeEntry macro function */ /* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into @@ -90,6 +93,8 @@ int SEGCOUNT = 0; extern int diskquota_max_table_segments; extern pg_atomic_uint32 *diskquota_table_size_entry_num; extern int diskquota_max_monitored_databases; +extern int diskquota_max_quotas; +extern pg_atomic_uint32 *diskquota_quota_info_entry_num; /* * local cache of table disk size and corresponding schema and owner. @@ -226,6 +231,7 @@ static void add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespace static void check_quota_map(QuotaType type); static void clear_all_quota_maps(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); +static struct QuotaMapEntry *put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *found); /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); @@ -248,6 +254,43 @@ static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +/* + * put QuotaMapEntry into quota_info[type].map and return this entry. + * return NULL: no free SHM for quota_info[type].map + */ +static struct QuotaMapEntry * +put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *found) +{ + struct QuotaMapEntry *entry; + uint32 counter = pg_atomic_read_u32(diskquota_quota_info_entry_num); + if (counter >= diskquota_max_quotas) + { + entry = hash_search(quota_info_map, key, HASH_FIND, found); + /* + * Too many quotas have been added to the quota_info_map, to avoid diskquota using + * too much shared memory, just return NULL. The diskquota won't work correctly + * anymore. + */ + if (!found) return NULL; + } + else + { + entry = hash_search(quota_info_map, key, HASH_ENTER, found); + if (!found) + { + counter = pg_atomic_add_fetch_u32(diskquota_quota_info_entry_num, 1); + if (counter >= diskquota_max_quotas) + { + ereport(WARNING, (errmsg("[diskquota] the number of quota exceeds the limit, please increase " + "the GUC value for diskquota.max_quotas. Current " + "diskquota.max_quotas value: %d", + diskquota_max_quotas))); + } + } + } + return entry; +} + /* add a new entry quota or update the old entry quota */ static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) @@ -256,7 +299,9 @@ update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) struct QuotaMapEntryKey key = {0}; memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); key.segid = segid; - struct QuotaMapEntry *entry = hash_search(quota_info[type].map, &key, HASH_ENTER, &found); + struct QuotaMapEntry *entry = put_quota_map_entry(quota_info[type].map, &key, &found); + /* If the number of quota exceeds the limit, entry will be NULL */ + if (entry == NULL) return; if (!found) { entry->size = 0; @@ -277,7 +322,9 @@ update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys) struct QuotaMapEntryKey key = {0}; memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); key.segid = i; - struct QuotaMapEntry *entry = hash_search(quota_info[type].map, &key, HASH_ENTER, &found); + struct QuotaMapEntry *entry = put_quota_map_entry(quota_info[type].map, &key, &found); + /* If the number of quota exceeds the limit, entry will be NULL */ + if (entry == NULL) continue; if (!found) { entry->size = 0; @@ -303,6 +350,7 @@ remove_quota(QuotaType type, Oid *keys, int16 segid) memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); key.segid = segid; hash_search(quota_info[type].map, &key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); } /* @@ -513,7 +561,6 @@ diskquota_worker_shmem_size() size = hash_estimate_size(MAX_NUM_TABLE_SIZE_ENTRIES / diskquota_max_monitored_databases + 100, sizeof(TableSizeEntry)); size = add_size(size, hash_estimate_size(MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, sizeof(LocalRejectMapEntry))); - size = add_size(size, hash_estimate_size(MAX_QUOTA_MAP_ENTRIES * NUM_QUOTA_TYPES, sizeof(struct QuotaMapEntry))); return size; } @@ -536,9 +583,13 @@ DiskQuotaShmemSize(void) if (IS_QUERY_DISPATCHER()) { + int num_quota_info_map = diskquota_max_monitored_databases * NUM_QUOTA_TYPES; + size = add_size(size, diskquota_launcher_shmem_size()); size = add_size(size, sizeof(pg_atomic_uint32)); size = add_size(size, diskquota_worker_shmem_size() * diskquota_max_monitored_databases); + size = add_size(size, + num_quota_info_map * hash_estimate_size(MAX_QUOTA_MAP_ENTRIES, sizeof(struct QuotaMapEntry))); } return size; @@ -580,8 +631,8 @@ init_disk_quota_model(uint32 id) memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.entrysize = sizeof(struct QuotaMapEntry); hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, HASH_ELEM, - DISKQUOTA_TAG_HASH); + quota_info[type].map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, + &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); } pfree(str.data); } @@ -652,6 +703,7 @@ vacuum_disk_quota_model(uint32 id) while ((qentry = hash_seq_search(&iter)) != NULL) { hash_search(quota_info[type].map, &qentry->keys, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); } } pfree(str.data); From e69412948e6094699a1d787ecdbaf3293967c901 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 15 Sep 2023 16:25:23 +0800 Subject: [PATCH 312/330] Fix bug: the counter of quota_info_map works wrong (#378) --- src/quotamodel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/quotamodel.c b/src/quotamodel.c index 9e5541ec6be..ae04948ac65 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -257,6 +257,7 @@ static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag /* * put QuotaMapEntry into quota_info[type].map and return this entry. * return NULL: no free SHM for quota_info[type].map + * found cannot be NULL */ static struct QuotaMapEntry * put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *found) @@ -271,12 +272,12 @@ put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *fo * too much shared memory, just return NULL. The diskquota won't work correctly * anymore. */ - if (!found) return NULL; + if (!(*found)) return NULL; } else { entry = hash_search(quota_info_map, key, HASH_ENTER, found); - if (!found) + if (!(*found)) { counter = pg_atomic_add_fetch_u32(diskquota_quota_info_entry_num, 1); if (counter >= diskquota_max_quotas) From 8210b78c8a5e5629e6e9c784f049ae601d85cb41 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 19 Sep 2023 14:49:35 +0800 Subject: [PATCH 313/330] Fix bug: diskquota stop working after removing any extension (#379) Fix the bug caused by #220: After the user removes any extension, the bgworker in the current database will be stopped. --- src/gp_activetable.c | 5 ++- tests/regress/diskquota_schedule | 1 + .../expected/test_drop_any_extension.out | 34 +++++++++++++++++++ tests/regress/sql/test_drop_any_extension.sql | 23 +++++++++++++ 4 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 tests/regress/expected/test_drop_any_extension.out create mode 100644 tests/regress/sql/test_drop_any_extension.sql diff --git a/src/gp_activetable.c b/src/gp_activetable.c index a0d1c524019..6e76633be7a 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -190,15 +190,14 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, { if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); - // if is 'drop extension diskquota' + /* if is 'drop extension diskquota' */ if (classId == ExtensionRelationId && access == OAT_DROP) { if (get_extension_oid("diskquota", true) == objectId) { invalidate_database_rejectmap(MyDatabaseId); + diskquota_stop_worker(); } - - diskquota_stop_worker(); return; } diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 98256f3435f..493401539b6 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -42,5 +42,6 @@ test: test_tablespace_diff_schema test: test_worker_schedule test: test_worker_schedule_exception test: test_dbname_encoding +test: test_drop_any_extension test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_drop_any_extension.out b/tests/regress/expected/test_drop_any_extension.out new file mode 100644 index 00000000000..1c8fbc66222 --- /dev/null +++ b/tests/regress/expected/test_drop_any_extension.out @@ -0,0 +1,34 @@ +CREATE DATABASE test_drop_db; +\c test_drop_db +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE t(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +DROP EXTENSION gp_inject_fault; +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); +ERROR: schema's disk space quota exceeded with name: public +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_db; diff --git a/tests/regress/sql/test_drop_any_extension.sql b/tests/regress/sql/test_drop_any_extension.sql new file mode 100644 index 00000000000..91a95dc2fc9 --- /dev/null +++ b/tests/regress/sql/test_drop_any_extension.sql @@ -0,0 +1,23 @@ +CREATE DATABASE test_drop_db; + +\c test_drop_db + +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); +CREATE TABLE t(i int); + +DROP EXTENSION gp_inject_fault; + +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_drop_db; From 57cd673cbe80d8ad092487d8e6186215cefb3522 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 18 Oct 2023 16:18:41 +0800 Subject: [PATCH 314/330] Disable flaky test (#384) --- tests/regress/diskquota_schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 493401539b6..b202de2a867 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -39,7 +39,7 @@ test: test_ctas_tablespace_role test: test_ctas_tablespace_schema test: test_default_tablespace test: test_tablespace_diff_schema -test: test_worker_schedule +# test: test_worker_schedule test: test_worker_schedule_exception test: test_dbname_encoding test: test_drop_any_extension From f8b665b6727a873dece0b69e78ecdf00da0fd63e Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Wed, 18 Oct 2023 17:39:47 +0800 Subject: [PATCH 315/330] Disable test_worker_schedule_exception (#385) --- tests/regress/diskquota_schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index b202de2a867..825600636bd 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -40,7 +40,7 @@ test: test_ctas_tablespace_schema test: test_default_tablespace test: test_tablespace_diff_schema # test: test_worker_schedule -test: test_worker_schedule_exception +# test: test_worker_schedule_exception test: test_dbname_encoding test: test_drop_any_extension test: test_drop_extension From fba3d06790a4123c4b8828f69f1b4d3a2c7e9c7c Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 19 Oct 2023 10:37:36 +0800 Subject: [PATCH 316/330] Move concourse to gp-extension-ci repo (#383) --- concourse/PROJ_NAME | 1 - concourse/README.md | 98 -------- .../{scripts/build_diskquota.sh => build.sh} | 2 +- concourse/fly.sh | 1 - concourse/lib/README.md | 1 - concourse/lib/base.lib.yml | 60 ----- concourse/lib/fly.sh | 179 ------------- concourse/lib/res_def_gpdb.yml | 148 ----------- concourse/lib/res_def_misc.yml | 7 - concourse/lib/res_types_def.yml | 21 -- concourse/pipeline/commit.yml | 41 --- concourse/pipeline/dev.yml | 41 --- concourse/pipeline/job_def.lib.yml | 238 ------------------ concourse/pipeline/pr.yml | 49 ---- concourse/pipeline/release.yml | 48 ---- concourse/pipeline/res_def.yml | 185 -------------- concourse/pipeline/trigger_def.lib.yml | 101 -------- concourse/pre_build.sh | 8 + concourse/pre_test.sh | 12 + concourse/scripts/entry.sh | 182 -------------- concourse/scripts/upgrade_extension.sh | 39 --- concourse/tasks/build_diskquota.yml | 20 -- concourse/tasks/test_diskquota.yml | 16 -- concourse/tasks/upgrade_extension.yml | 16 -- .../{scripts/test_diskquota.sh => test.sh} | 4 +- 25 files changed, 23 insertions(+), 1495 deletions(-) delete mode 100644 concourse/PROJ_NAME delete mode 100644 concourse/README.md rename concourse/{scripts/build_diskquota.sh => build.sh} (94%) delete mode 120000 concourse/fly.sh delete mode 100644 concourse/lib/README.md delete mode 100644 concourse/lib/base.lib.yml delete mode 100755 concourse/lib/fly.sh delete mode 100644 concourse/lib/res_def_gpdb.yml delete mode 100644 concourse/lib/res_def_misc.yml delete mode 100644 concourse/lib/res_types_def.yml delete mode 100644 concourse/pipeline/commit.yml delete mode 100644 concourse/pipeline/dev.yml delete mode 100644 concourse/pipeline/job_def.lib.yml delete mode 100644 concourse/pipeline/pr.yml delete mode 100644 concourse/pipeline/release.yml delete mode 100644 concourse/pipeline/res_def.yml delete mode 100644 concourse/pipeline/trigger_def.lib.yml create mode 100755 concourse/pre_build.sh create mode 100755 concourse/pre_test.sh delete mode 100755 concourse/scripts/entry.sh delete mode 100755 concourse/scripts/upgrade_extension.sh delete mode 100644 concourse/tasks/build_diskquota.yml delete mode 100644 concourse/tasks/test_diskquota.yml delete mode 100644 concourse/tasks/upgrade_extension.yml rename concourse/{scripts/test_diskquota.sh => test.sh} (93%) diff --git a/concourse/PROJ_NAME b/concourse/PROJ_NAME deleted file mode 100644 index 67f1c089995..00000000000 --- a/concourse/PROJ_NAME +++ /dev/null @@ -1 +0,0 @@ -diskquota diff --git a/concourse/README.md b/concourse/README.md deleted file mode 100644 index 1007f454077..00000000000 --- a/concourse/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Pipelines - -## Naming Prefix Rule - -- `pr.` for pull-request pipelines -- `merge..` for branch pipelines. It will be executed when a commit committed/merged into the branch. -- `dev...` for personal development usage. Put your name into the pipeline name so others can know who own it. -- `_test..` for pipeline debugging. - -## Pipelines for daily work - -### PR Pipeline - -https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/pr.diskquota - -### Main Branch Pipeline - -The development happens on the `gpdb` branch. The merge pipeline for the `gpdb` branch is -https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/merge.diskquota:gpdb - - -# Fly a pipeline - -## Prerequisite - -- Install [ytt](https://carvel.dev/ytt/). It's written in go. So just download the executable for your platform from the [release page](https://github.com/vmware-tanzu/carvel-ytt/releases). -- Make the `fly` command in the `PATH` or export its location to `FLY` env. -- Login with the `fly` command. Assume we are using `dev2` as the target name. - -``` -# -n gp-extensions is to set the concourse team -fly -t dev2 login -c https://dev2.ci.gpdb.pivotal.io -n gp-extensions -``` - -- `cd` to the `concourse` directory. - -## Fly the PR pipeline - -``` -./fly.sh -t dev2 -c pr -``` - -## Fly the merge pipeline - -``` -./fly.sh -t dev2 -c merge -``` - -## Fly the release pipeline - -By default, the release is built from the `gpdb` branch. - -The release pipeline should be located in https://prod.ci.gpdb.pivotal.io - -``` -# Login to prod -fly -t prod login -c https://prod.ci.gpdb.pivotal.io -# Fly the release pipeline -./fly.sh -t prod -c rel -``` - -To fly a release pipeline from a specific branch: - -``` -./fly.sh -t -c rel -b release/. -``` - -## Fly the dev pipeline - -``` -./fly.sh -t dev2 -c dev -p -b -``` - -## Webhook - -By default, the PR and merge pipelines are using webhook instead of polling to trigger a build. The webhook URL will be printed when flying such a pipeline by `fly.sh`. The webhook needs to be set in the `github repository` -> `Settings` -> `Webhooks` with push notification enabled. - -To test if the webhook works, use `curl` to send a `POST` request to the hook URL with some random data. If it is the right URL, the relevant resource will be refreshed on the Concourse UI. The command line looks like: - -``` -curl --data-raw "foo" -``` - -## Update gp-extensions-ci - -We place some of the resources of concourse in a separate repository https://github.com/pivotal/gp-extensions-ci/. And we use that repo as a subtree with prefix ./concourse/lib. This is how to pull from the repo gp-extensions-ci: - -```sh - git subtree pull --prefix concourse/lib git@github.com:pivotal/gp-extensions-ci.git main --squash -``` - -# FAQ - -## PR pipeline is not triggered. - -The PR pipeline relies on the webhook to detect the new PR. However, due to the the limitation of the webhook implemention of concourse, we rely on the push hook for this. It means if the PR is from a forked repo, the PR pipeline won't be triggered immediately. To manually trigger the pipeline, go to https://dev2.ci.gpdb.pivotal.io/teams/gp-extensions/pipelines/pr.diskquota/resources/diskquota_pr and click ⟳ button there. - -TIPS: Just don't fork, name your branch as `/` and push it here to create PR. diff --git a/concourse/scripts/build_diskquota.sh b/concourse/build.sh similarity index 94% rename from concourse/scripts/build_diskquota.sh rename to concourse/build.sh index 7b2ee30b6af..f752d0434b4 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/build.sh @@ -10,7 +10,7 @@ function pkg() { export CC="$(which gcc)" export CXX="$(which g++)" - pushd /home/gpadmin/diskquota_artifacts + pushd /home/gpadmin/bin_diskquota local last_release_path last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) cmake /home/gpadmin/diskquota_src \ diff --git a/concourse/fly.sh b/concourse/fly.sh deleted file mode 120000 index 09b854d19ec..00000000000 --- a/concourse/fly.sh +++ /dev/null @@ -1 +0,0 @@ -lib/fly.sh \ No newline at end of file diff --git a/concourse/lib/README.md b/concourse/lib/README.md deleted file mode 100644 index 0dd8ca554c9..00000000000 --- a/concourse/lib/README.md +++ /dev/null @@ -1 +0,0 @@ -# Common ytt libs for gp-extensions concourse diff --git a/concourse/lib/base.lib.yml b/concourse/lib/base.lib.yml deleted file mode 100644 index 78072de58be..00000000000 --- a/concourse/lib/base.lib.yml +++ /dev/null @@ -1,60 +0,0 @@ -#@ load("@ytt:data", "data") -#@ load("@ytt:struct", "struct") - -#! resources and resource_types could be declared in different data-value files. -#! We check the key prefix, if they are 'resources' and 'resource_types', just -#! merge them into local dictionaries. -#@ resources = [] -#@ resource_types = [] -#@ data_values_dict = struct.decode(data.values) -#@ for key in data_values_dict.keys(): -#@ if key.startswith('resources'): -#@ resources.extend(data_values_dict[key]) -#@ end -#@ end -#@ for key in data_values_dict.keys(): -#@ if key.startswith('resource_type'): -#@ resource_types.extend(data_values_dict[key]) -#@ end -#@ end - -#! add_res_by_xxx is to solve the unused resources error for concourse -#@ def add_res_by_conf(res_map, job_conf): -#@ for key in job_conf: -#@ if key.startswith("res_"): -#@ val = job_conf[key] -#@ if type(val) == "list" or type(val) == "yamlfragment": -#@ for res_name in val: -#@ res_map[res_name] = True -#@ end -#@ else: -#@ res_name = val -#@ res_map[res_name] = True -#@ end -#@ end -#@ end -#@ end -#@ -#@ def add_res_by_name(res_map, res_name): -#@ res_map[res_name] = True -#@ end -#@ -#@ def declare_res(res_type_map, res_map): -#@ for val in resources: -#@ res_name = val["name"] -#@ res_type = val["type"] -#@ if res_map.get(val["name"]): -#@ res_type_map[res_type] = True - - #@ val -#@ end -#@ end -#@ end -#@ -#@ def declare_res_type(res_type_map): -#@ for val in resource_types: -#@ type_name = val["name"] -#@ if res_type_map.get(type_name): - - #@ val -#@ end -#@ end -#@ end diff --git a/concourse/lib/fly.sh b/concourse/lib/fly.sh deleted file mode 100755 index d68c5c5c94f..00000000000 --- a/concourse/lib/fly.sh +++ /dev/null @@ -1,179 +0,0 @@ -#!/bin/bash - -set -e - -fly=${FLY:-"fly"} -echo "'fly' command: ${fly}" -echo "" - -my_path=$(realpath -s "${BASH_SOURCE[0]}") -my_dir=$(dirname "${my_path}") -proj_name_file="${my_dir}/PROJ_NAME" -if [ ! -f "${proj_name_file}" ]; then - echo "A 'PROJ_NAME' file is needed in '${my_dir}'" - exit 1 -fi -proj_name=$(cat "${proj_name_file}") -concourse_team="main" - -usage() { - if [ -n "$1" ]; then - echo "$1" 1>&2 - echo "" 1>&2 - fi - - echo "Usage: $0 -t -c [-p ] [-b branch] [-T]" - echo "Options:" - echo " '-T' adds '_test' suffix to the pipeline type. Useful for pipeline debugging." - exit 1 -} - -# Hacky way to find out which concourse team is being used. -# The team name is needed to generate webhook URL -detect_concourse_team() { - local target="$1" - local fly_rc_file="$HOME/.flyrc" - local found_target=false - while read -r line; - do - line="$(echo -e "${line}" | tr -d '[:space:]')" - if [ ${found_target} != true ] && [ "${line}" = "${target}:" ]; then - found_target=true - fi - if [ ${found_target} = true ] && [[ "${line}" == team:* ]]; then - concourse_team=$(echo "${line}" | cut --delimiter=":" --fields=2) - echo "Use concourse target: ${target}, team: ${concourse_team}" - return - fi - done < "${fly_rc_file}" -} - -# Parse command line options -while getopts ":c:t:p:b:T" o; do - case "${o}" in - c) - # pipeline type/config. pr/merge/dev/rel - pipeline_config=${OPTARG} - ;; - t) - # concourse target - target=${OPTARG} - ;; - p) - # pipeline name - postfix=${OPTARG} - ;; - b) - # branch name - branch=${OPTARG} - ;; - T) - test_suffix="_test" - ;; - *) - usage "" - ;; - esac -done -shift $((OPTIND-1)) - -if [ -z "${target}" ] || [ -z "${pipeline_config}" ]; then - usage "" -fi - -detect_concourse_team "${target}" - -pipeline_type="" -# Decide ytt options to generate pipeline -case ${pipeline_config} in - pr) - pipeline_type="pr" - config_file="pr.yml" - hook_res="${proj_name}_pr" - ;; - merge|commit) - # Default branch is 'gpdb' as it is our main branch - if [ -z "${branch}" ]; then - branch="gpdb" - fi - pipeline_type="merge" - config_file="commit.yml" - hook_res="${proj_name}_commit" - ;; - dev) - if [ -z "${postfix}" ]; then - usage "'-p' needs to be supplied to specify the pipeline name postfix for flying a 'dev' pipeline." - fi - if [ -z "${branch}" ]; then - usage "'-b' needs to be supplied to specify the branch for flying a 'dev' pipeline." - fi - pipeline_type="dev" - config_file="dev.yml" - ;; - release|rel) - # Default branch is 'gpdb' as it is our main branch - if [ -z "${branch}" ]; then - branch="gpdb" - fi - pipeline_type="rel" - config_file="release.yml" - hook_res="${proj_name}_commit" - ;; - *) - usage "" - ;; -esac - -yml_path="/tmp/${proj_name}.yml" -pipeline_dir="${my_dir}/pipeline" -lib_dir="${my_dir}/lib" -# pipeline cannot contain '/' -pipeline_name=${pipeline_name/\//"_"} - -# Generate pipeline name -if [ -n "${test_suffix}" ]; then - pipeline_type="${pipeline_type}_test" -fi -pipeline_name="${pipeline_type}.${proj_name}" -if [ -n "${branch}" ]; then - pipeline_name="${pipeline_name}.${branch}" -fi -if [ -n "${postfix}" ]; then - pipeline_name="${pipeline_name}.${postfix}" -fi -# pipeline cannot contain '/' -pipeline_name=${pipeline_name/\//"_"} - -ytt \ - --data-values-file "${pipeline_dir}/res_def.yml" \ - --data-values-file "${lib_dir}/res_def_gpdb.yml" \ - --data-values-file "${lib_dir}/res_def_misc.yml" \ - --data-values-file "${lib_dir}/res_types_def.yml" \ - -f "${lib_dir}/base.lib.yml" \ - -f "${pipeline_dir}/job_def.lib.yml" \ - -f "${pipeline_dir}/trigger_def.lib.yml" \ - -f "${pipeline_dir}/${config_file}" > "${yml_path}" -echo "Generated pipeline yaml '${yml_path}'." - -echo "" -echo "Fly the pipeline..." -set -v -"${fly}" \ - -t "${target}" \ - sp \ - -p "${pipeline_name}" \ - -c "${yml_path}" \ - -v "${proj_name}-branch=${branch}" -set +v - -if [ "${pipeline_config}" == "dev" ]; then - exit 0 -fi - -concourse_url=$(fly targets | awk "{if (\$1 == \"${target}\") {print \$2}}") -echo "" -echo "================================================================================" -echo "Remeber to set the the webhook URL on GitHub:" -echo "${concourse_url}/api/v1/teams/${concourse_team}/pipelines/${pipeline_name}/resources/${hook_res}/check/webhook?webhook_token=" -echo "You may need to change the base URL if a different concourse server is used." -echo "================================================================================" diff --git a/concourse/lib/res_def_gpdb.yml b/concourse/lib/res_def_gpdb.yml deleted file mode 100644 index b4384480b1c..00000000000 --- a/concourse/lib/res_def_gpdb.yml +++ /dev/null @@ -1,148 +0,0 @@ -resources_gpdb: -# Image Resources -# centos6 -- name: centos6-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-build - tag: latest -- name: centos6-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos6-test - tag: latest -# centos7 -- name: centos7-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-build - tag: latest -- name: centos7-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-centos7-test - tag: latest -# rhel8 -- name: rhel8-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-build - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) -- name: rhel8-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb6-rhel8-test - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) -# Ubuntu18 -- name: ubuntu18-gpdb6-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-build - tag: latest -- name: ubuntu18-gpdb6-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb6-ubuntu18.04-test - tag: latest -# GPDB7 -# build -- name: rocky8-gpdb7-image-build - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-build - tag: latest -# test -- name: rocky8-gpdb7-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-public-images/gpdb7-rocky8-test - tag: latest -- name: rhel8-gpdb7-image-test - type: registry-image - source: - repository: gcr.io/data-gpdb-private-images/gpdb7-rhel8-test - tag: latest - username: _json_key - password: ((data-gpdb-private-images-container-registry-readonly-service-account-key)) - -# gpdb binary on gcs is located as different folder for different version -# Latest build with assertion enabled: -# --enable-cassert --enable-tap-tests --enable-debug-extensions -- name: bin_gpdb6_centos6_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel6_x86_64.debug.tar.gz -- name: bin_gpdb6_centos7_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel7_x86_64.debug.tar.gz -- name: bin_gpdb6_rhel8_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-rhel8_x86_64.debug.tar.gz -- name: bin_gpdb6_ubuntu18_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/server-rc-(.*\+dev\.\d+.*)-ubuntu18.04_x86_64.debug.tar.gz -- name: bin_gpdb7_el8_debug - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*\+dev\.\d+.*)-el8_x86_64.debug.tar.gz -# Latest release candidates, no fault-injector, no assertion: -# --disable-debug-extensions --disable-tap-tests --enable-ic-proxy -- name: bin_gpdb6_centos6 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-centos6.tar.gz -- name: bin_gpdb6_centos7 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-centos7.tar.gz -- name: bin_gpdb6_rhel8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-rhel8.tar.gz -- name: bin_gpdb6_ubuntu18 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb6/greenplum-db-server-(6\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-ubuntu18.04.tar.gz -- name: bin_gpdb7_el8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/release-candidates/gpdb7/greenplum-db-server-(7\.([0-9]|([1-8][0-9])|(9[0-8]))\..*\+dev\.\d+)-.*-el8.tar.gz - -# Greenplum sources -- name: gpdb6_src - type: git - source: - branch: 6X_STABLE - uri: https://github.com/greenplum-db/gpdb.git -- name: gpdb7_src - type: git - source: - branch: main - uri: https://github.com/greenplum-db/gpdb.git diff --git a/concourse/lib/res_def_misc.yml b/concourse/lib/res_def_misc.yml deleted file mode 100644 index 3105dd38d66..00000000000 --- a/concourse/lib/res_def_misc.yml +++ /dev/null @@ -1,7 +0,0 @@ -resources_misc: - -# Other dependencies -- name: slack_notify_extensions - type: slack-alert - source: - url: ((extension/extensions-slack-webhook)) diff --git a/concourse/lib/res_types_def.yml b/concourse/lib/res_types_def.yml deleted file mode 100644 index 07eebf6ea94..00000000000 --- a/concourse/lib/res_types_def.yml +++ /dev/null @@ -1,21 +0,0 @@ -resource_types_common: -- name: gcs - type: registry-image - check_every: 1h - source: - repository: frodenas/gcs-resource - tag: latest - -- name: pull-request - type: registry-image - check_every: 1h - source: - repository: teliaoss/github-pr-resource - tag: latest - -- name: slack-alert - type: registry-image - source: - repository: arbourd/concourse-slack-alert-resource - tag: latest - diff --git a/concourse/pipeline/commit.yml b/concourse/pipeline/commit.yml deleted file mode 100644 index 823dfcc2868..00000000000 --- a/concourse/pipeline/commit.yml +++ /dev/null @@ -1,41 +0,0 @@ -#@ load("job_def.lib.yml", -#@ "entrance_job", -#@ "build_test_job", -#@ "centos6_gpdb6_conf", -#@ "centos7_gpdb6_conf", -#@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf", -#@ "rhel8_gpdb7_conf",) -#@ load("trigger_def.lib.yml", -#@ "commit_trigger", -#@ ) -#@ -#@ load("base.lib.yml", "declare_res", "declare_res_type") -#@ res_map = {} -#@ res_type_map = {} -#@ trigger = commit_trigger(res_map) -#@ confs = [ -#@ centos6_gpdb6_conf(), -#@ centos7_gpdb6_conf(), -#@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf(), -#@ rhel8_gpdb7_conf(), -#@ ] -jobs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ } -- #@ entrance_job(param) -#@ for conf in confs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ "conf": conf -#@ } -- #@ build_test_job(param) -#@ end - -resources: #@ declare_res(res_type_map, res_map) - -resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/dev.yml b/concourse/pipeline/dev.yml deleted file mode 100644 index 009eb2167b7..00000000000 --- a/concourse/pipeline/dev.yml +++ /dev/null @@ -1,41 +0,0 @@ -#@ load("job_def.lib.yml", -#@ "entrance_job", -#@ "build_test_job", -#@ "centos6_gpdb6_conf", -#@ "centos7_gpdb6_conf", -#@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf", -#@ "rhel8_gpdb7_conf") -#@ load("trigger_def.lib.yml", -#@ "commit_dev_trigger", -#@ ) -#@ -#@ load("base.lib.yml", "declare_res", "declare_res_type") -#@ res_map = {} -#@ res_type_map = {} -#@ trigger = commit_dev_trigger(res_map) -#@ confs= [ -#@ centos6_gpdb6_conf(release_build=False), -#@ centos7_gpdb6_conf(release_build=False), -#@ rhel8_gpdb6_conf(release_build=False), -#@ ubuntu18_gpdb6_conf(release_build=False), -#@ rhel8_gpdb7_conf(release_build=False), -#@ ] -jobs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ } -- #@ entrance_job(param) -#@ for conf in confs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ "conf": conf -#@ } -- #@ build_test_job(param) -#@ end - -resources: #@ declare_res(res_type_map, res_map) - -resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/job_def.lib.yml b/concourse/pipeline/job_def.lib.yml deleted file mode 100644 index c206ca6e071..00000000000 --- a/concourse/pipeline/job_def.lib.yml +++ /dev/null @@ -1,238 +0,0 @@ -#@ load("base.lib.yml", "add_res_by_conf", "add_res_by_name") -#@ load("@ytt:template", "template") - -#@ def inter_bin_name(base_name, release_build): -#@ if release_build: -#@ return base_name + "_rel" -#@ end -#@ -#@ return base_name -#@ end - -#! Job config for centos6 -#! Use bin_gpdb_postfix="" to use a release version of gpdb binary -#@ def centos6_gpdb6_conf(release_build=False): -res_build_image: centos6-gpdb6-image-build -res_test_images: [centos6-gpdb6-image-test] -res_gpdb_bin: #@ "bin_gpdb6_centos6" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel6 -res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel6_intermediates", release_build) -release_bin: bin_diskquota_gpdb6_rhel6_release -os: rhel6 -gpdb_ver: 6 -build_type: #@ "Release" if release_build else "Debug" -#@ end - -#! Job config for centos7 -#@ def centos7_gpdb6_conf(release_build=False): -res_build_image: centos7-gpdb6-image-build -res_test_images: [centos7-gpdb6-image-test] -res_gpdb_bin: #@ "bin_gpdb6_centos7" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel7 -res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel7_intermediates", release_build) -release_bin: bin_diskquota_gpdb6_rhel7_release -os: rhel7 -gpdb_ver: 6 -build_type: #@ "Release" if release_build else "Debug" -#@ end - -#! Job config for rhel8 -#@ def rhel8_gpdb6_conf(release_build=False): -res_build_image: rhel8-gpdb6-image-build -res_test_images: [rhel8-gpdb6-image-test] -res_gpdb_bin: #@ "bin_gpdb6_rhel8" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_rhel8 -res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_rhel8_intermediates", release_build) -release_bin: bin_diskquota_gpdb6_rhel8_release -os: rhel8 -gpdb_ver: 6 -build_type: #@ "Release" if release_build else "Debug" -#@ end - -#! Job config for ubuntu18 -#@ def ubuntu18_gpdb6_conf(release_build=False): -res_build_image: ubuntu18-gpdb6-image-build -res_test_images: [ubuntu18-gpdb6-image-test] -res_gpdb_bin: #@ "bin_gpdb6_ubuntu18" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb6_ubuntu18 -res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb6_ubuntu18_intermediates", release_build) -release_bin: bin_diskquota_gpdb6_ubuntu18_release -os: ubuntu18.04 -gpdb_ver: 6 -build_type: #@ "Release" if release_build else "Debug" -#@ end - -#! Job config for GPDB7, rhel8 -#@ def rhel8_gpdb7_conf(release_build=False): -res_build_image: rocky8-gpdb7-image-build -res_test_images: [rocky8-gpdb7-image-test, rhel8-gpdb7-image-test] -res_gpdb_bin: #@ "bin_gpdb7_el8" + ("" if release_build else "_debug") -res_diskquota_bin: bin_diskquota_gpdb7_rhel8 -res_intermediates_bin: #@ inter_bin_name("bin_diskquota_gpdb7_rhel8_intermediates", release_build) -release_bin: bin_diskquota_gpdb7_rhel8_release -os: rhel8 -gpdb_ver: 7 -build_type: #@ "Release" if release_build else "Debug" -#@ end - -#! The entry point of a pipeline. The job name must be 'entrance'. -#@ def entrance_job(param): -#@ trigger = param["trigger"] -name: entrance -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -plan: -#@ for to_get in trigger["to_get"]: -- trigger: #@ trigger["auto_trigger"] - _: #@ template.replace(to_get) -#@ end -#@ for to_put in trigger["to_put_pre"]: -- #@ to_put -#@ end -#@ end - -#@ def exit_job(param): -#@ trigger = param["trigger"] -#@ confs = param["confs"] -#@ passed_jobs = [] -#@ for conf in confs: -#@ passed_jobs.append(build_test_job_name(conf)) -#@ end -name: exit -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -on_success: #@ trigger["on_success"] -plan: -#@ for to_get in trigger["to_get"]: -- passed: passed_jobs - trigger: true - _: #@ template.replace(to_get) -#@ end -#@ end - -#@ def exit_pr_job(param): -#@ trigger = param["trigger"] -#@ confs = param["confs"] -#@ passed_jobs = [] -#@ for conf in confs: -#@ passed_jobs.append(build_test_job_name(conf)) -#@ end -name: exit_pr -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -on_success: #@ trigger["on_success"] -plan: -#@ for to_get in trigger["to_get"]: -- passed: #@ passed_jobs - trigger: true - _: #@ template.replace(to_get) -#@ end -#@ end - -#! The final release job -#! 1. Push the artifacts to the release bucket -#! 2. Push the git tag -#@ def exit_release_job(param): -#@ trigger = param["trigger"] -#@ confs = param["confs"] -#@ passed_jobs = [] -#@ res_map = param["res_map"] -#@ for conf in confs: -#@ passed_jobs.append(build_test_job_name(conf)) -#@ add_res_by_name(res_map, conf["release_bin"]) -#@ end -name: exit_release -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -on_success: #@ trigger["on_success"] -plan: -#@ for to_get in trigger["to_get"]: -- passed: #@ passed_jobs - _: #@ template.replace(to_get) -#@ end -- in_parallel: - steps: -#@ for i in range(len(confs)): -#@ conf = confs[i] - - do: - - get: #@ conf["res_intermediates_bin"] - passed: - - #@ passed_jobs[i] - params: - unpack: true - - put: #@ conf["release_bin"] - params: - file: #@ conf["res_intermediates_bin"] + "/diskquota-*-*.tar.gz" -#@ end -#@ for to_put in trigger["to_put_post"]: -- #@ to_put -#@ end -#@ end - -#@ def _build_task(conf): -task: #@ "build_" + conf["os"] -file: diskquota_src/concourse/tasks/build_diskquota.yml -image: #@ conf["res_build_image"] -input_mapping: - bin_gpdb: #@ conf["res_gpdb_bin"] -params: - DISKQUOTA_OS: #@ conf["os"] - BUILD_TYPE: #@ conf["build_type"] -#@ end - -#@ def _test_task(conf): -#@ images = conf['res_test_images'] -in_parallel: -#@ for image in images: -#@ test_os = image.split("-")[0] - - task: #@ "test_" + test_os - timeout: 2h - file: diskquota_src/concourse/tasks/test_diskquota.yml - image: #@ image - input_mapping: - bin_gpdb: #@ conf["res_gpdb_bin"] - bin_diskquota: diskquota_artifacts - params: - DISKQUOTA_OS: #@ conf["os"] -#@ end -#@ end - -#@ def build_test_job_name(conf): -#@ return "build_test_gpdb{}_{}".format(conf["gpdb_ver"], conf["os"]) -#@ end -#@ def build_test_job(param): -#@ res_map = param["res_map"] -#@ trigger = param["trigger"] -#@ conf = param["conf"] -#@ res_gpdb_src = "gpdb{}_src".format(conf['gpdb_ver']) -#@ add_res_by_name(res_map, res_gpdb_src) -#@ add_res_by_name(res_map, "bin_cmake") -#@ add_res_by_name(res_map, "bin_diskquota_intermediates") -#@ add_res_by_conf(res_map, conf) -name: #@ build_test_job_name(conf) -max_in_flight: 10 -on_failure: #@ trigger["on_failure"] -on_error: #@ trigger["on_error"] -plan: -#@ for to_get in trigger["to_get"]: -- passed: [entrance] - trigger: true - _: #@ template.replace(to_get) -#@ end -- in_parallel: - - get: gpdb_src - resource: #@ res_gpdb_src - - get: bin_cmake - - get: #@ conf["res_build_image"] -#@ for test_image in conf["res_test_images"]: - - get: #@ test_image -#@ end - - get: #@ conf["res_gpdb_bin"] - - get: last_released_diskquota_bin - resource: #@ conf["res_diskquota_bin"] -- #@ _build_task(conf) -- #@ _test_task(conf) -- put: #@ conf["res_intermediates_bin"] - params: - file: diskquota_artifacts/diskquota.tar.gz -#@ end diff --git a/concourse/pipeline/pr.yml b/concourse/pipeline/pr.yml deleted file mode 100644 index 4a715120c24..00000000000 --- a/concourse/pipeline/pr.yml +++ /dev/null @@ -1,49 +0,0 @@ -#@ load("job_def.lib.yml", -#@ "entrance_job", -#@ "exit_pr_job", -#@ "build_test_job", -#@ "centos6_gpdb6_conf", -#@ "centos7_gpdb6_conf", -#@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf", -#@ "rhel8_gpdb7_conf" -#@ ) -#@ load("trigger_def.lib.yml", -#@ "pr_trigger", -#@ ) -#@ load("base.lib.yml", -#@ "declare_res", -#@ "declare_res_type") -#@ res_map = {} -#@ res_type_map = {} -#@ trigger = pr_trigger(res_map) -#@ confs = [ -#@ centos6_gpdb6_conf(), -#@ centos7_gpdb6_conf(), -#@ rhel8_gpdb6_conf(), -#@ ubuntu18_gpdb6_conf(), -#@ rhel8_gpdb7_conf(), -#@ ] -jobs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ } -- #@ entrance_job(param) -#@ for conf in confs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ "conf": conf -#@ } -- #@ build_test_job(param) -#@ end -#@ param = { -#@ "trigger": trigger, -#@ "confs": confs -#@ } -- #@ exit_pr_job(param) - -resources: #@ declare_res(res_type_map, res_map) - -resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/release.yml b/concourse/pipeline/release.yml deleted file mode 100644 index 023e86bd88f..00000000000 --- a/concourse/pipeline/release.yml +++ /dev/null @@ -1,48 +0,0 @@ -#@ load("job_def.lib.yml", -#@ "entrance_job", -#@ "build_test_job", -#@ "exit_release_job", -#@ "centos6_gpdb6_conf", -#@ "centos7_gpdb6_conf", -#@ "rhel8_gpdb6_conf", -#@ "ubuntu18_gpdb6_conf", -#@ "rhel8_gpdb7_conf") -#@ load("trigger_def.lib.yml", -#@ "release_trigger", -#@ ) -#@ -#@ load("base.lib.yml", "declare_res", "declare_res_type") -#@ res_map = {} -#@ res_type_map = {} -#@ trigger = release_trigger(res_map) -#@ confs = [ -#@ centos6_gpdb6_conf(release_build=True), -#@ centos7_gpdb6_conf(release_build=True), -#@ rhel8_gpdb6_conf(release_build=True), -#@ ubuntu18_gpdb6_conf(release_build=True), -#@ rhel8_gpdb7_conf(release_build=True) -#@ ] -jobs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ } -- #@ entrance_job(param) -#@ for conf in confs: -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ "conf": conf -#@ } -- #@ build_test_job(param) -#@ end -#@ param = { -#@ "res_map": res_map, -#@ "trigger": trigger, -#@ "confs": confs -#@ } -- #@ exit_release_job(param) - -resources: #@ declare_res(res_type_map, res_map) - -resource_types: #@ declare_res_type(res_type_map) diff --git a/concourse/pipeline/res_def.yml b/concourse/pipeline/res_def.yml deleted file mode 100644 index 89382604596..00000000000 --- a/concourse/pipeline/res_def.yml +++ /dev/null @@ -1,185 +0,0 @@ -resources: -# Pull Request -- name: diskquota_pr - type: pull-request - # We should rely on the webhook. See README if webhook doesn't work - webhook_token: ((extension/diskquota-webhook-token)) - check_every: 24h - source: - disable_forks: true - repository: greenplum-db/diskquota - access_token: ((extension/github-access-token)) -# Commit trigger -- name: diskquota_commit - type: git - # We should rely on the webhook. See README if webhook doesn't work - webhook_token: ((extension/diskquota-webhook-token)) - check_every: 1h - source: - branch: ((diskquota-branch)) - uri: https://github.com/greenplum-db/diskquota.git - username: ((extension/github-access-token)) - password: x-oauth-basic -# Commit dev trigger. Not using webhook -- name: diskquota_commit_dev - type: git - check_every: 1m - source: - branch: ((diskquota-branch)) - uri: https://github.com/greenplum-db/diskquota.git - username: ((extension/github-access-token)) - password: x-oauth-basic - - -# Diskquota releases -- name: bin_diskquota_gpdb6_rhel6 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel6_x86_64.tar.gz - -- name: bin_diskquota_gpdb6_rhel7 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel7_x86_64.tar.gz - -- name: bin_diskquota_gpdb6_rhel8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*)-rhel8_x86_64.tar.gz - -- name: bin_diskquota_gpdb6_ubuntu18 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*)-ubuntu18.04_x86_64.tar.gz - -- name: bin_diskquota_gpdb7_rhel8 - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb7/diskquota-(.*)-rhel8_x86_64.tar.gz - -# For uploading every build to gcs -# Dev -- name: bin_diskquota_gpdb6_rhel6_intermediates - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates/diskquota/diskquota_rhel6_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_rhel7_intermediates - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates/diskquota/diskquota_rhel7_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_rhel8_intermediates - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_ubuntu18_intermediates - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates/diskquota/diskquota_ubuntu18_gpdb6.tar.gz - -- name: bin_diskquota_gpdb7_rhel8_intermediates - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates/diskquota/diskquota_rhel8_gpdb7.tar.gz - -# Rel -- name: bin_diskquota_gpdb6_rhel6_intermediates_rel - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates_release/diskquota/diskquota_rhel6_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_rhel7_intermediates_rel - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates_release/diskquota/diskquota_rhel7_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_rhel8_intermediates_rel - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb6.tar.gz - -- name: bin_diskquota_gpdb6_ubuntu18_intermediates_rel - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates_release/diskquota/diskquota_ubuntu18_gpdb6.tar.gz - -- name: bin_diskquota_gpdb7_rhel8_intermediates_rel - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - versioned_file: intermediates_release/diskquota/diskquota_rhel8_gpdb7.tar.gz - -# For uploading to the release bucket -- name: bin_diskquota_gpdb6_rhel6_release - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_gpdb6_rhel7_release - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_gpdb6_rhel8_release - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_gpdb6_ubuntu18_release - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb6/diskquota-(.*).tar.gz - -- name: bin_diskquota_gpdb7_rhel8_release - type: gcs - source: - bucket: pivotal-gpdb-concourse-resources-prod - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: diskquota/released/gpdb7/diskquota-(.*).tar.gz - -# Other dependencies -- name: bin_cmake - type: gcs - source: - bucket: gp-extensions-ci - json_key: ((extension/extensions-gcs-service-account-key-dev2)) - regexp: dependencies/cmake-(.*)-linux-x86_64.sh diff --git a/concourse/pipeline/trigger_def.lib.yml b/concourse/pipeline/trigger_def.lib.yml deleted file mode 100644 index 607cb7a3fa3..00000000000 --- a/concourse/pipeline/trigger_def.lib.yml +++ /dev/null @@ -1,101 +0,0 @@ -#@ load("base.lib.yml", "add_res_by_name") - -#! PR trigger. For pull request pipelines -#@ def pr_trigger(res_map): -#@ add_res_by_name(res_map, "diskquota_pr") -auto_trigger: true -to_get: - - get: diskquota_src - resource: diskquota_pr - params: - fetch_tags: true -to_put_pre: - - put: diskquota_pr - params: - path: diskquota_src - status: pending -to_put_post: #@ [] -on_failure: - put: diskquota_pr - params: - path: diskquota_src - status: failure -on_error: - put: diskquota_pr - params: - path: diskquota_src - status: failure -on_success: - put: diskquota_pr - params: - path: diskquota_src - status: success -#@ end - -#! Commit trigger. For master pipelines -#@ def commit_trigger(res_map): -#@ add_res_by_name(res_map, "diskquota_commit") -#@ add_res_by_name(res_map, "slack_notify_extensions") -auto_trigger: true -to_get: -- get: diskquota_src - resource: diskquota_commit -to_put_pre: #@ [] -to_put_post: #@ [] -#! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. -#! Unfortunately it doesn't work with Concourse 5. -on_success: -on_failure: - put: slack_notify_extensions - params: - alert_type: failed -on_error: - put: slack_notify_extensions - params: - alert_type: errored -#@ end - -#! Commit trigger. For dev pipelines. No webhook -#@ def commit_dev_trigger(res_map): -#@ add_res_by_name(res_map, "diskquota_commit_dev") -auto_trigger: true -to_get: -- get: diskquota_src - resource: diskquota_commit_dev -to_put_pre: #@ [] -to_put_post: #@ [] -#! To set the github commit status, https://github.com/Pix4D/cogito is a good choice. -#! Unfortunately it doesn't work with Concourse 5. -on_success: -on_failure: -on_error: -#@ end - -#! Commit trigger. For release pipelines -#@ def release_trigger(res_map): -#@ add_res_by_name(res_map, "diskquota_commit") -#@ add_res_by_name(res_map, "slack_notify_extensions") -auto_trigger: true -to_get: -- get: diskquota_src - resource: diskquota_commit -to_put_pre: #@ [] -to_put_post: -- put: diskquota_commit - params: - repository: diskquota_src - tag: diskquota_src/VERSION -on_success: - put: slack_notify_extensions - params: - alert_type: success - text: A new diskquota release has been pushed! -on_failure: - put: slack_notify_extensions - params: - alert_type: failed -on_error: - put: slack_notify_extensions - params: - alert_type: errored -#@ end diff --git a/concourse/pre_build.sh b/concourse/pre_build.sh new file mode 100755 index 00000000000..9d9adf90a7b --- /dev/null +++ b/concourse/pre_build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -ex + +# shellcheck source=/dev/null +source "$CI_REPO_DIR/common/entry_common.sh" + +install_cmake diff --git a/concourse/pre_test.sh b/concourse/pre_test.sh new file mode 100755 index 00000000000..6e7319eb8e4 --- /dev/null +++ b/concourse/pre_test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -ex + +# shellcheck source=/dev/null +source "$CI_REPO_DIR/common/entry_common.sh" + +install_cmake + +start_gpdb + +create_fake_gpdb_src diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh deleted file mode 100755 index b3c70ddd653..00000000000 --- a/concourse/scripts/entry.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/bin/bash - -# Entry point for GPDB source & cluster related tasks. -# This script setup basic build/test environment including: -# - Create a gpadmin user -# - Copy all files from /tmp/build/xxx (concourse WORKDIR) to /home/gpadmin/ and chown to gpadmin -# - Some dependencies doesn't exist in build/test image. -# - Special setup for individual task which needs root permission. -# - At the end, call the task script with gpadmin permission. -# -# Simple rules: -# 1. Any root operations should happen in this script. -# 2. Task script only requires gpadmin permission. -# 3. Since everything has been copied to the /home/gpadmin directory, use absolute path as much as -# as possible in the task script, it will reduce the confusion when we fly into the concourse -# container. -# 4. Bash functions should be idempotent as much as possible to make fly hijack debugging easier. - -set -eox - -if [[ ! ${PWD} =~ /tmp/build/[0-9a-z]* ]]; then - echo "This script should always be started from concourse WORKDIR." -fi - -# Internal utilty functions -_determine_os() { - local name version - if [ -f /etc/redhat-release ]; then - name="centos" - version=$(sed /dev/stderr - exit 1 - fi - echo "${name}${version}" -} - -# Global ENV defines -# /tmp/build/xxxxx. it should not be used in normal conditions. Use /home/gpadmin instead. -# Everything has been linked there. -export CONCOURSE_WORK_DIR=${PWD} - - -# Dependency installers -# Ideally all dependencies should exist in the docker image. Use this script to install them only -# if it is more difficult to change it in the image side. -# Download the dependencies with concourse resources as much as possible, then we could benifit from -# concourse's resource cache system. -install_cmake() { - # cmake_new to avoid name collision with the docker image. - local cmake_home="/opt/cmake_new" - if [ -e "${cmake_home}" ]; then - echo "cmake might have been installed in ${cmake_home}" - return - fi - echo "Installing cmake to ${cmake_home}..." - pushd bin_cmake - mkdir -p "${cmake_home}" - sh cmake-*-linux-x86_64.sh --skip-license --prefix="${cmake_home}" - popd - echo "export PATH=${cmake_home}/bin:$PATH" >> /home/gpadmin/.bashrc -} - -# Create gpadmin user and chown all files in the PWD. All files will be linked to /home/gpadmin. -# All of our work should be started from there. -setup_gpadmin() { - # If the gpadmin exist, quit - if grep -c '^gpadmin:' /etc/passwd; then - return - fi - - # If the image has sshd, then we call gpdb's setup_gpadmin_user.sh to create the gpadmin user - # and setup the ssh. - # Otherwise, create the gpadmin user only. - if [ -f /etc/ssh/sshd_config ]; then - local gpdb_concourse_dir="${CONCOURSE_WORK_DIR}/gpdb_src/concourse/scripts" - "${gpdb_concourse_dir}/setup_gpadmin_user.bash" - else - local test_os=$(_determine_os) - # Below is copied from setup_gpadmin_user.bash - groupadd supergroup - case "$test_os" in - centos*) - /usr/sbin/useradd -G supergroup,tty gpadmin - ;; - ubuntu*) - /usr/sbin/useradd -G supergroup,tty gpadmin -s /bin/bash - ;; - sles*) - # create a default group gpadmin, and add user gpadmin to group gapdmin, supergroup, - # tty - /usr/sbin/useradd -U -G supergroup,tty gpadmin - ;; - photon*) - /usr/sbin/useradd -U -G supergroup,tty,root gpadmin - ;; - *) echo "Unknown OS: $test_os"; exit 1 ;; - esac - fi - mkdir -p /home/gpadmin - chown gpadmin:gpadmin /home/gpadmin - - chown -R gpadmin:gpadmin /tmp/build - ln -s "${CONCOURSE_WORK_DIR}"/* /home/gpadmin -} - -# Extract gpdb binary -function install_gpdb() { - [ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel - tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/*.tar.gz -C /usr/local/greenplum-db-devel - chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel -} - -## Currently, isolation2 testing framework relies on pg_isolation2_regress, we -## should build it from source. However, in concourse, the gpdb_bin is fetched -## from remote machine, the $(abs_top_srcdir) variable points to a non-existing -## location, we fixes this issue by creating a symbolic link for it. -function create_fake_gpdb_src() { - local fake_gpdb_src - fake_gpdb_src="$(\ - grep -rhw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ - head -n 1 | awk '{ print $NF; }')" - - if [ -d "${fake_gpdb_src}" ]; then - echo "Fake gpdb source directory has been configured." - return - fi - - pushd /home/gpadmin/gpdb_src - ./configure --prefix=/usr/local/greenplum-db-devel \ - --without-zstd \ - --disable-orca --disable-gpcloud --enable-debug-extensions - popd - - local fake_root - fake_root=$(dirname "${fake_gpdb_src}") - mkdir -p "${fake_root}" - ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}" -} - -# Setup common environment -setup_gpadmin -install_cmake -install_gpdb - -# Do the special setup with root permission for the each task, then run the real task script with -# gpadmin. bashrc won't be read by 'su', it needs to be sourced explicitly. -case "$1" in - build) - su gpadmin -c \ - "source /home/gpadmin/.bashrc &&\ - /home/gpadmin/diskquota_src/concourse/scripts/build_diskquota.sh" - ;; - test) - # Build task output is diskquota_artifacts, which is different from test taks input - # diskquota_bin. Ideally we can use the same name for input and output. But that will cause - # compatible issues with 1.x pipeline script. - ln -s /home/gpadmin/bin_diskquota /home/gpadmin/diskquota_artifacts - create_fake_gpdb_src - # Create GPDB cluster - source "/home/gpadmin/gpdb_src/concourse/scripts/common.bash" - make_cluster - # To make fly debug easier - echo "source /usr/local/greenplum-db-devel/greenplum_path.sh" >> /home/gpadmin/.bashrc - su gpadmin -c \ - "source /home/gpadmin/.bashrc &&\ - /home/gpadmin/diskquota_src/concourse/scripts/test_diskquota.sh" - ;; - *) - echo "Unknown target task $1" - exit 1 - ;; -esac diff --git a/concourse/scripts/upgrade_extension.sh b/concourse/scripts/upgrade_extension.sh deleted file mode 100755 index 2f1e4f94f33..00000000000 --- a/concourse/scripts/upgrade_extension.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -l - -set -exo pipefail - -CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -TOP_DIR=${CWDIR}/../../../ -GPDB_CONCOURSE_DIR=${TOP_DIR}/gpdb_src/concourse/scripts -CUT_NUMBER=6 - -source "${GPDB_CONCOURSE_DIR}/common.bash" -source "${TOP_DIR}/diskquota_src/concourse/scripts/test_common.sh" - -# those two functions are called by upgrade_test -function install_old_version_diskquota() { - tar -xzf ../../bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel -} - -function install_new_version_diskquota() { - # the current dir is upgrade_test - tar -xzf ../../bin_diskquota_new/*.tar.gz -C /usr/local/greenplum-db-devel -} - -function _main() { - time install_gpdb - time setup_gpadmin_user - - time make_cluster - if [ "${DISKQUOTA_OS}" == "ubuntu18.04" ]; then - CUT_NUMBER=6 - fi - # firstly install an old version diskquota to start diskquota - tar -xzf bin_diskquota_old/*.tar.gz -C /usr/local/greenplum-db-devel - # export install_old_version_diskquota install_new_version_diskquota function, becuase they will be called by upgrade_test - export -f install_old_version_diskquota - export -f install_new_version_diskquota - time test ${TOP_DIR}/diskquota_src/upgrade_test false -} - -_main "$@" diff --git a/concourse/tasks/build_diskquota.yml b/concourse/tasks/build_diskquota.yml deleted file mode 100644 index cacf0fb2c9b..00000000000 --- a/concourse/tasks/build_diskquota.yml +++ /dev/null @@ -1,20 +0,0 @@ -platform: linux -image_resource: - type: docker-image -inputs: - - name: bin_gpdb - - name: diskquota_src - - name: gpdb_src - - name: bin_cmake - - name: last_released_diskquota_bin - -outputs: - - name: diskquota_artifacts - -run: - path: diskquota_src/concourse/scripts/entry.sh - args: - - build -params: - DISKQUOTA_OS: - BUILD_TYPE: diff --git a/concourse/tasks/test_diskquota.yml b/concourse/tasks/test_diskquota.yml deleted file mode 100644 index ed9bfdc2a60..00000000000 --- a/concourse/tasks/test_diskquota.yml +++ /dev/null @@ -1,16 +0,0 @@ -platform: linux -image_resource: - type: docker-image -inputs: - - name: bin_gpdb - - name: diskquota_src - - name: bin_diskquota - - name: gpdb_src - - name: bin_cmake - -run: - path: diskquota_src/concourse/scripts/entry.sh - args: - - test -params: - DISKQUOTA_OS: diff --git a/concourse/tasks/upgrade_extension.yml b/concourse/tasks/upgrade_extension.yml deleted file mode 100644 index d0d1bf9d14d..00000000000 --- a/concourse/tasks/upgrade_extension.yml +++ /dev/null @@ -1,16 +0,0 @@ -platform: linux -image_resource: - type: docker-image -inputs: - - name: bin_gpdb - - name: bin_diskquota_old - - name: bin_diskquota_new - - name: gpdb_src - - name: diskquota_src - -run: - path: diskquota_src/concourse/scripts/upgrade_extension.sh -params: - DISKQUOTA_OS: - OLD_VERSION: - NEW_VERSION: diff --git a/concourse/scripts/test_diskquota.sh b/concourse/test.sh similarity index 93% rename from concourse/scripts/test_diskquota.sh rename to concourse/test.sh index 686d279c12c..76c57b91246 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/test.sh @@ -19,7 +19,7 @@ function _main() { local tmp_dir="$(mktemp -d)" tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C "$tmp_dir" pushd "$tmp_dir" - ./install_gpdb_component + ./install_gpdb_component popd source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh @@ -28,7 +28,7 @@ function _main() { make -C src/test/isolation2 install popd - pushd /home/gpadmin/diskquota_artifacts + pushd /home/gpadmin/bin_diskquota # Show regress diff if test fails export SHOW_REGRESS_DIFF=1 time cmake --build . --target installcheck From 240ad95facfa55ad36fc091d8f5e5d899027eb31 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 19 Oct 2023 14:30:32 +0800 Subject: [PATCH 317/330] Build diskquota for rocky9. (#386) - skip add last version to the current build. - skip upgrade test. --- concourse/build.sh | 20 ++++++++++++++------ concourse/test.sh | 6 ++++++ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/concourse/build.sh b/concourse/build.sh index f752d0434b4..97d9acccf09 100755 --- a/concourse/build.sh +++ b/concourse/build.sh @@ -11,12 +11,20 @@ function pkg() { export CXX="$(which g++)" pushd /home/gpadmin/bin_diskquota - local last_release_path - last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) - cmake /home/gpadmin/diskquota_src \ - -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ - -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" - cmake --build . --target create_artifact + if [[ $OS_NAME == "rhel9" ]] + then + cmake /home/gpadmin/diskquota_src \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -DDISKQUOTA_DDL_CHANGE_CHECK=off + cmake --build . --target create_artifact + else + local last_release_path + last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) + cmake /home/gpadmin/diskquota_src \ + -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ + -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" + cmake --build . --target create_artifact + fi popd } diff --git a/concourse/test.sh b/concourse/test.sh index 76c57b91246..6ed0cffba5d 100755 --- a/concourse/test.sh +++ b/concourse/test.sh @@ -35,11 +35,17 @@ function _main() { # Run test again with standby master activate_standby time cmake --build . --target installcheck + if [[ $OS_NAME != "rhel9" ]] + then # Run upgrade test (with standby master) time cmake --build . --target upgradecheck + fi popd + if [[ $OS_NAME != "rhel9" ]] + then time /home/gpadmin/diskquota_src/upgrade_test/alter_test.sh + fi } _main From 5afcda5b1c47edeb039b62dcd2df0b4d6deb2e52 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 19 Oct 2023 15:50:38 +0800 Subject: [PATCH 318/330] Fix flaky test test_relation_size. (#387) Co-authored-by: Chen Mulong Co-authored-by: Xing Guo --- .../isolation2/expected7/test_relation_size.out | 17 +++++++++++++++++ tests/isolation2/sql/test_relation_size.sql | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/tests/isolation2/expected7/test_relation_size.out b/tests/isolation2/expected7/test_relation_size.out index 3ddafe8fda5..ee2e4241e82 100644 --- a/tests/isolation2/expected7/test_relation_size.out +++ b/tests/isolation2/expected7/test_relation_size.out @@ -1,3 +1,14 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- -- 1. Test that when a relation is dropped before diskquota.relation_size() -- applying stat(2) on the physical file, diskquota.relation_size() consumes @@ -85,3 +96,9 @@ SELECT pg_relation_size('t_ao'); (1 row) DROP TABLE t_ao; DROP TABLE + +SELECT diskquota.resume(); + resume +-------- + +(1 row) diff --git a/tests/isolation2/sql/test_relation_size.sql b/tests/isolation2/sql/test_relation_size.sql index c8817f52e72..54ea209d5c0 100644 --- a/tests/isolation2/sql/test_relation_size.sql +++ b/tests/isolation2/sql/test_relation_size.sql @@ -1,3 +1,6 @@ +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); + -- -- 1. Test that when a relation is dropped before diskquota.relation_size() -- applying stat(2) on the physical file, diskquota.relation_size() consumes @@ -43,3 +46,5 @@ CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); SELECT diskquota.relation_size('t_ao'); SELECT pg_relation_size('t_ao'); DROP TABLE t_ao; + +SELECT diskquota.resume(); From a8c0bb3a3615e0d048e3e804afdd07c45fc665a1 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 19 Oct 2023 16:02:06 +0800 Subject: [PATCH 319/330] Fix wrong test result on GP6. (#388) --- .../isolation2/expected/test_relation_size.out | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/isolation2/expected/test_relation_size.out b/tests/isolation2/expected/test_relation_size.out index 45e9a9cc149..65efe5006ff 100644 --- a/tests/isolation2/expected/test_relation_size.out +++ b/tests/isolation2/expected/test_relation_size.out @@ -1,3 +1,14 @@ +SELECT diskquota.pause(); + pause +------- + +(1 row) +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + -- -- 1. Test that when a relation is dropped before diskquota.relation_size() -- applying stat(2) on the physical file, diskquota.relation_size() consumes @@ -85,3 +96,9 @@ SELECT pg_relation_size('t_ao'); (1 row) DROP TABLE t_ao; DROP + +SELECT diskquota.resume(); + resume +-------- + +(1 row) From dd1020a1b5d85f3ba970961922e088be955ec581 Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Thu, 19 Oct 2023 17:38:36 +0800 Subject: [PATCH 320/330] Fix flaky test test_dbname_encoding (#389) --- tests/regress/expected/test_dbname_encoding.out | 12 ++++++++++++ tests/regress/sql/test_dbname_encoding.sql | 2 ++ 2 files changed, 14 insertions(+) diff --git a/tests/regress/expected/test_dbname_encoding.out b/tests/regress/expected/test_dbname_encoding.out index d7b31373461..67e2f62d4ed 100644 --- a/tests/regress/expected/test_dbname_encoding.out +++ b/tests/regress/expected/test_dbname_encoding.out @@ -23,6 +23,18 @@ WHERE t (1 row) +SELECT diskquota.pause(); + pause +------- + +(1 row) + +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + DROP EXTENSION diskquota; \c contrib_regression DROP DATABASE 数据库1; diff --git a/tests/regress/sql/test_dbname_encoding.sql b/tests/regress/sql/test_dbname_encoding.sql index 408b6a0a5f1..6ae65b3ea15 100644 --- a/tests/regress/sql/test_dbname_encoding.sql +++ b/tests/regress/sql/test_dbname_encoding.sql @@ -16,6 +16,8 @@ WHERE ) > 0 AND position(current_database() in logmessage) > 0; +SELECT diskquota.pause(); +SELECT diskquota.wait_for_worker_new_epoch(); DROP EXTENSION diskquota; \c contrib_regression DROP DATABASE 数据库1; \ No newline at end of file From 9f2b5043c5e47842108ad827a25b1ae4cb5c03ee Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Fri, 20 Oct 2023 11:20:09 +0800 Subject: [PATCH 321/330] Refactor quota info map (#380) Merge all types of quota info maps into one in each bgworker. --- src/diskquota.c | 4 +- src/diskquota.h | 25 +++++ src/quotamodel.c | 233 ++++++++++++++++++----------------------------- 3 files changed, 115 insertions(+), 147 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 45df2f80d48..92389eac847 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -92,7 +92,7 @@ static int num_db = 0; /* how many TableSizeEntry are maintained in all the table_size_map in shared memory*/ pg_atomic_uint32 *diskquota_table_size_entry_num; -/* how many QuotaMapEntry are maintained in all the quota_info[type].map in shared memory*/ +/* how many QuotaInfoEntry are maintained in all the quota_info_map in shared memory*/ pg_atomic_uint32 *diskquota_quota_info_entry_num; static DiskquotaLauncherShmemStruct *DiskquotaLauncherShmem; @@ -412,7 +412,7 @@ define_guc_variables(void) DefineCustomIntVariable("diskquota.max_monitored_databases", "Max number of database on the cluster.", NULL, &diskquota_max_monitored_databases, 50, 1, 1024, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_quotas", "Max number of quotas on the cluster.", NULL, &diskquota_max_quotas, - 1024 * 1024, 1024 * NUM_QUOTA_TYPES, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); + 1024 * 1024, 1024 * INIT_QUOTA_MAP_ENTRIES, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ diff --git a/src/diskquota.h b/src/diskquota.h index 70eaf8d3bf9..b9d90ca08ab 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -38,6 +38,14 @@ #define MAX_NUM_TABLE_SIZE_ENTRIES (diskquota_max_table_segments / SEGMENT_SIZE_ARRAY_LENGTH) /* length of segment size array in TableSizeEntry */ #define SEGMENT_SIZE_ARRAY_LENGTH 100 +/* max number of keys in QuotaInfoEntryKey */ +#define MAX_NUM_KEYS_QUOTA_MAP 8 +/* init number of QuotaInfoEntry in quota_info_map */ +#define INIT_QUOTA_MAP_ENTRIES 128 +#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quotas / diskquota_max_monitored_databases) +/* max number of QuotaInfoEntry in quota_info_map */ +#define MAX_QUOTA_MAP_ENTRIES (AVG_QUOTA_MAP_ENTRIES < 1024 ? 1024 : AVG_QUOTA_MAP_ENTRIES) + typedef enum { DISKQUOTA_TAG_HASH = 0, @@ -88,6 +96,23 @@ typedef enum NUM_QUOTA_TYPES, } QuotaType; +/* + * table disk size and corresponding schema, owner and tablespace + */ +typedef struct QuotaInfoEntryKey +{ + QuotaType type; + Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; + int16 segid; +} QuotaInfoEntryKey; + +typedef struct QuotaInfoEntry +{ + QuotaInfoEntryKey key; + int64 size; + int64 limit; +} QuotaInfoEntry; + typedef enum { FETCH_ACTIVE_OID, /* fetch active table list */ diff --git a/src/quotamodel.c b/src/quotamodel.c index ae04948ac65..93c80e608bc 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -50,16 +50,10 @@ #define INIT_DISK_QUOTA_REJECT_ENTRIES 8192 /* per database level max size of rejectmap */ #define MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES 8192 -#define MAX_NUM_KEYS_QUOTA_MAP 8 /* Number of attributes in quota configuration records. */ #define NUM_QUOTA_CONFIG_ATTRS 6 /* Number of entries for diskquota.table_size update SQL */ #define SQL_MAX_VALUES_NUMBER 1000000 -/* Inital number of entries for hash table in quota_info */ -#define INIT_QUOTA_MAP_ENTRIES (128) -#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quotas / (diskquota_max_monitored_databases * NUM_QUOTA_TYPES)) -/* Number of entries for hash table in quota_info */ -#define MAX_QUOTA_MAP_ENTRIES (AVG_QUOTA_MAP_ENTRIES < 1024 ? 1024 : AVG_QUOTA_MAP_ENTRIES) /* TableSizeEntry macro function */ /* Use the top bit of totalsize as a flush flag. If this bit is set, the size should be flushed into @@ -136,46 +130,21 @@ typedef enum } TableSizeEntryFlag; /* - * table disk size and corresponding schema and owner + * quota_key_num array contains the number of key for each type of quota. + * |----------------------------|---------------| + * | Quota Type | Number of Key | + * |----------------------------|---------------| + * | NAMESPACE_QUOTA | 1 | + * | ROLE_QUOTA | 1 | + * | NAMESPACE_TABLESPACE_QUOTA | 2 | + * | ROLE_TABLESPACE_QUOTA | 2 | + * | TABLESPACE_QUOTA | 1 | + * |----------------------------|---------------| */ -struct QuotaMapEntryKey -{ - Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; - int16 segid; -}; - -struct QuotaMapEntry -{ - Oid keys[MAX_NUM_KEYS_QUOTA_MAP]; - int16 segid; - int64 size; - int64 limit; -}; - -struct QuotaInfo -{ - char *map_name; - unsigned int num_keys; - Oid *sys_cache; - HTAB *map; -}; - -struct QuotaInfo quota_info[NUM_QUOTA_TYPES] = { - [NAMESPACE_QUOTA] = {.map_name = "Namespace map", - .num_keys = 1, - .sys_cache = (Oid[]){NAMESPACEOID}, - .map = NULL}, - [ROLE_QUOTA] = {.map_name = "Role map", .num_keys = 1, .sys_cache = (Oid[]){AUTHOID}, .map = NULL}, - [NAMESPACE_TABLESPACE_QUOTA] = {.map_name = "Tablespace-namespace map", - .num_keys = 2, - .sys_cache = (Oid[]){NAMESPACEOID, TABLESPACEOID}, - .map = NULL}, - [ROLE_TABLESPACE_QUOTA] = {.map_name = "Tablespace-role map", - .num_keys = 2, - .sys_cache = (Oid[]){AUTHOID, TABLESPACEOID}, - .map = NULL}, - [TABLESPACE_QUOTA] = { - .map_name = "Tablespace map", .num_keys = 1, .sys_cache = (Oid[]){TABLESPACEOID}, .map = NULL}}; +uint16 quota_key_num[NUM_QUOTA_TYPES] = {1, 1, 2, 2, 1}; +Oid quota_key_caches[NUM_QUOTA_TYPES][MAX_NUM_KEYS_QUOTA_MAP] = { + {NAMESPACEOID}, {AUTHOID}, {NAMESPACEOID, TABLESPACEOID}, {AUTHOID, TABLESPACEOID}, {TABLESPACEOID}}; +HTAB *quota_info_map; /* global rejectmap for which exceed their quota limit */ struct RejectMapEntry @@ -226,12 +195,11 @@ static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* functions to maintain the quota maps */ static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid); static void update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys); -static void remove_quota(QuotaType type, Oid *keys, int16 segid); static void add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool segexceeded); -static void check_quota_map(QuotaType type); -static void clear_all_quota_maps(void); +static void refresh_quota_info_map(void); +static void clean_all_quota_limit(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); -static struct QuotaMapEntry *put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *found); +static QuotaInfoEntry *put_quota_map_entry(QuotaInfoEntryKey *key, bool *found); /* functions to refresh disk quota model*/ static void refresh_disk_quota_usage(bool is_init); @@ -255,15 +223,15 @@ static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFla static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); /* - * put QuotaMapEntry into quota_info[type].map and return this entry. - * return NULL: no free SHM for quota_info[type].map + * put QuotaInfoEntry into quota_info_map and return this entry. + * return NULL: no free SHM for quota_info_map * found cannot be NULL */ -static struct QuotaMapEntry * -put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *found) +static QuotaInfoEntry * +put_quota_map_entry(QuotaInfoEntryKey *key, bool *found) { - struct QuotaMapEntry *entry; - uint32 counter = pg_atomic_read_u32(diskquota_quota_info_entry_num); + QuotaInfoEntry *entry; + uint32 counter = pg_atomic_read_u32(diskquota_quota_info_entry_num); if (counter >= diskquota_max_quotas) { entry = hash_search(quota_info_map, key, HASH_FIND, found); @@ -296,19 +264,20 @@ put_quota_map_entry(HTAB *quota_info_map, struct QuotaMapEntryKey *key, bool *fo static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) { - bool found; - struct QuotaMapEntryKey key = {0}; - memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); - key.segid = segid; - struct QuotaMapEntry *entry = put_quota_map_entry(quota_info[type].map, &key, &found); + bool found; + QuotaInfoEntry *entry; + QuotaInfoEntryKey key = {0}; + + memcpy(key.keys, keys, quota_key_num[type] * sizeof(Oid)); + key.type = type; + key.segid = segid; + entry = put_quota_map_entry(&key, &found); /* If the number of quota exceeds the limit, entry will be NULL */ if (entry == NULL) return; if (!found) { entry->size = 0; entry->limit = -1; - memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); - entry->segid = key.segid; } entry->size += size; } @@ -320,40 +289,26 @@ update_limit_for_quota(int64 limit, float segratio, QuotaType type, Oid *keys) bool found; for (int i = -1; i < SEGCOUNT; i++) { - struct QuotaMapEntryKey key = {0}; - memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); - key.segid = i; - struct QuotaMapEntry *entry = put_quota_map_entry(quota_info[type].map, &key, &found); + QuotaInfoEntry *entry; + QuotaInfoEntryKey key = {0}; + + memcpy(key.keys, keys, quota_key_num[type] * sizeof(Oid)); + key.type = type; + key.segid = i; + entry = put_quota_map_entry(&key, &found); /* If the number of quota exceeds the limit, entry will be NULL */ if (entry == NULL) continue; if (!found) { entry->size = 0; - memcpy(entry->keys, keys, quota_info[type].num_keys * sizeof(Oid)); - entry->segid = key.segid; } if (key.segid == -1) - { entry->limit = limit; - } else - { entry->limit = round((limit / SEGCOUNT) * segratio); - } } } -/* remove a entry quota from the map */ -static void -remove_quota(QuotaType type, Oid *keys, int16 segid) -{ - struct QuotaMapEntryKey key = {0}; - memcpy(key.keys, keys, quota_info[type].num_keys * sizeof(Oid)); - key.segid = segid; - hash_search(quota_info[type].map, &key, HASH_REMOVE, NULL); - pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); -} - /* * Compare the disk quota limit and current usage of a database object. * Put them into local rejectmap if quota limit is exceeded. @@ -380,23 +335,24 @@ add_quota_to_rejectmap(QuotaType type, Oid targetOid, Oid tablespaceoid, bool se * the quota limit, if it does, add it to the rejectmap. */ static void -check_quota_map(QuotaType type) +refresh_quota_info_map(void) { - HeapTuple tuple; - HASH_SEQ_STATUS iter; - struct QuotaMapEntry *entry; - - hash_seq_init(&iter, quota_info[type].map); + HeapTuple tuple; + HASH_SEQ_STATUS iter; + QuotaInfoEntry *entry; + hash_seq_init(&iter, quota_info_map); while ((entry = hash_seq_search(&iter)) != NULL) { - bool removed = false; - for (int i = 0; i < quota_info[type].num_keys; ++i) + bool removed = false; + QuotaType type = entry->key.type; + for (int i = 0; i < quota_key_num[type]; ++i) { - tuple = SearchSysCache1(quota_info[type].sys_cache[i], ObjectIdGetDatum(entry->keys[i])); + tuple = SearchSysCache1(quota_key_caches[type][i], ObjectIdGetDatum(entry->key.keys[i])); if (!HeapTupleIsValid(tuple)) { - remove_quota(type, entry->keys, entry->segid); + hash_search(quota_info_map, &entry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); removed = true; break; } @@ -406,15 +362,15 @@ check_quota_map(QuotaType type) { if (entry->size >= entry->limit) { - Oid targetOid = entry->keys[0]; + Oid targetOid = entry->key.keys[0]; /* when quota type is not NAMESPACE_TABLESPACE_QUOTA or ROLE_TABLESPACE_QUOTA, the tablespaceoid * is set to be InvalidOid, so when we get it from map, also set it to be InvalidOid */ Oid tablespaceoid = (type == NAMESPACE_TABLESPACE_QUOTA) || (type == ROLE_TABLESPACE_QUOTA) - ? entry->keys[1] + ? entry->key.keys[1] : InvalidOid; - bool segmentExceeded = entry->segid == -1 ? false : true; + bool segmentExceeded = entry->key.segid == -1 ? false : true; add_quota_to_rejectmap(type, targetOid, tablespaceoid, segmentExceeded); } } @@ -430,17 +386,14 @@ transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *ne } static void -clear_all_quota_maps(void) +clean_all_quota_limit(void) { - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + HASH_SEQ_STATUS iter; + QuotaInfoEntry *entry; + hash_seq_init(&iter, quota_info_map); + while ((entry = hash_seq_search(&iter)) != NULL) { - HASH_SEQ_STATUS iter = {0}; - hash_seq_init(&iter, quota_info[type].map); - struct QuotaMapEntry *entry = NULL; - while ((entry = hash_seq_search(&iter)) != NULL) - { - entry->limit = -1; - } + entry->limit = -1; } } @@ -584,13 +537,11 @@ DiskQuotaShmemSize(void) if (IS_QUERY_DISPATCHER()) { - int num_quota_info_map = diskquota_max_monitored_databases * NUM_QUOTA_TYPES; - size = add_size(size, diskquota_launcher_shmem_size()); size = add_size(size, sizeof(pg_atomic_uint32)); size = add_size(size, diskquota_worker_shmem_size() * diskquota_max_monitored_databases); - size = add_size(size, - num_quota_info_map * hash_estimate_size(MAX_QUOTA_MAP_ENTRIES, sizeof(struct QuotaMapEntry))); + size = add_size(size, hash_estimate_size(MAX_QUOTA_MAP_ENTRIES, sizeof(QuotaInfoEntry)) * + diskquota_max_monitored_databases); } return size; @@ -624,17 +575,14 @@ init_disk_quota_model(uint32 id) DiskquotaShmemInitHash(str.data, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, MAX_LOCAL_DISK_QUOTA_REJECT_ENTRIES, &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); - /* for quota_info */ + /* for quota_info_map */ + format_name("QuotaInfoMap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(QuotaInfoEntry); + hash_ctl.keysize = sizeof(QuotaInfoEntryKey); + quota_info_map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, + HASH_ELEM, DISKQUOTA_TAG_HASH); - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) - { - format_name(quota_info[type].map_name, id, &str); - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - quota_info[type].map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, - &hash_ctl, HASH_ELEM, DISKQUOTA_TAG_HASH); - } pfree(str.data); } @@ -653,10 +601,10 @@ init_disk_quota_model(uint32 id) void vacuum_disk_quota_model(uint32 id) { - HASH_SEQ_STATUS iter; - TableSizeEntry *tsentry = NULL; - LocalRejectMapEntry *localrejectentry; - struct QuotaMapEntry *qentry; + HASH_SEQ_STATUS iter; + TableSizeEntry *tsentry = NULL; + LocalRejectMapEntry *localrejectentry; + QuotaInfoEntry *qentry; HASHCTL hash_ctl; StringInfoData str; @@ -690,23 +638,20 @@ vacuum_disk_quota_model(uint32 id) hash_search(local_disk_quota_reject_map, &localrejectentry->keyitem, HASH_REMOVE, NULL); } - /* quota_info */ - - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) + /* quota_info_map */ + format_name("QuotaInfoMap", id, &str); + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.entrysize = sizeof(QuotaInfoEntry); + hash_ctl.keysize = sizeof(QuotaInfoEntryKey); + quota_info_map = DiskquotaShmemInitHash(str.data, INIT_QUOTA_MAP_ENTRIES, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, + HASH_ELEM, DISKQUOTA_TAG_HASH); + hash_seq_init(&iter, quota_info_map); + while ((qentry = hash_seq_search(&iter)) != NULL) { - format_name(quota_info[type].map_name, id, &str); - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.entrysize = sizeof(struct QuotaMapEntry); - hash_ctl.keysize = sizeof(struct QuotaMapEntryKey); - quota_info[type].map = DiskquotaShmemInitHash(str.data, 1024L, MAX_QUOTA_MAP_ENTRIES, &hash_ctl, HASH_ELEM, - DISKQUOTA_TAG_HASH); - hash_seq_init(&iter, quota_info[type].map); - while ((qentry = hash_seq_search(&iter)) != NULL) - { - hash_search(quota_info[type].map, &qentry->keys, HASH_REMOVE, NULL); - pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); - } + hash_search(quota_info_map, &qentry->key, HASH_REMOVE, NULL); + pg_atomic_fetch_sub_u32(diskquota_quota_info_entry_num, 1); } + pfree(str.data); } @@ -877,10 +822,8 @@ refresh_disk_quota_usage(bool is_init) /* TODO: if we can skip the following steps when there is no active table */ /* recalculate the disk usage of table, schema and role */ calculate_table_disk_usage(is_init, local_active_table_stat_map); - for (QuotaType type = 0; type < NUM_QUOTA_TYPES; ++type) - { - check_quota_map(type); - } + /* refresh quota_info_map */ + refresh_quota_info_map(); /* flush local table_size_map to user table table_size */ flush_to_table_size(); /* copy local reject map back to shared reject map */ @@ -1510,7 +1453,7 @@ do_load_quotas(void) * quota.config. A flag in shared memory could be used to detect the quota * config change. */ - clear_all_quota_maps(); + clean_all_quota_limit(); /* * read quotas from diskquota.quota_config and target table @@ -1579,11 +1522,11 @@ do_load_quotas(void) if (spcOid == InvalidOid) { - if (quota_info[quotaType].num_keys != 1) + if (quotaType == NAMESPACE_TABLESPACE_QUOTA || quotaType == ROLE_TABLESPACE_QUOTA) { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] tablespace Oid MUST NOT be NULL for quota type: %d. num_keys: %d", - quotaType, quota_info[quotaType].num_keys))); + quotaType, quota_key_num[quotaType]))); } update_limit_for_quota(quota_limit_mb * (1 << 20), segratio, quotaType, (Oid[]){targetOid}); } From ca155045e781e8656b5df32a9a436408b3f50faa Mon Sep 17 00:00:00 2001 From: Chen Mulong Date: Fri, 20 Oct 2023 19:00:43 +0800 Subject: [PATCH 322/330] [CMake] Fix package name for Rocky9/RHEL9 (#391) The package name is incorrect when releasing package in Rocky9/RHEL9 platforms (--unknown_x86_64.tar.gz). This patch fix it to --rhel9_x86_64.tar.gz. Cherry-pick https://github.com/pivotal/timestamp9/pull/41 --- cmake/Distro.cmake | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmake/Distro.cmake b/cmake/Distro.cmake index 14f18f8c6f7..bf7bcbf687d 100644 --- a/cmake/Distro.cmake +++ b/cmake/Distro.cmake @@ -15,13 +15,17 @@ if(NOT DISTRO_NAME) string(REGEX MATCH "CentOS Linux release 7.*" matched7 "${rh_release}") string(REGEX MATCH "Red Hat Enterprise Linux release 8.*" matched_rhel8 "${rh_release}") string(REGEX MATCH "CentOS Linux release 8.*" matched_centos8 "${rh_release}") - string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 8.*" matched_rocky8 "${rh_release}") + string(REGEX MATCH "Red Hat Enterprise Linux release 9.*" matched_rhel9 "${rh_release}") + string(REGEX MATCH "Rocky Linux release 9.*" matched_rocky9 "${rh_release}") if (matched6) set(DISTRO_NAME rhel6) elseif(matched7) set(DISTRO_NAME rhel7) elseif(matched_rhel8 OR matched_centos8 OR matched_rocky8) set(DISTRO_NAME rhel8) + elseif(matched_rhel9 OR matched_rocky9) + set(DISTRO_NAME rhel9) endif() elseif(EXISTS "/etc/os-release") file(READ /etc/os-release os_release) From 49a9a2df1279dd0ce37d7a8b03920ece3e5136ca Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Sun, 22 Oct 2023 20:48:33 +0800 Subject: [PATCH 323/330] Rename diskquota.max_quotas to diskquota.max_quota_probe (#390) --- src/diskquota.c | 7 ++++--- src/diskquota.h | 2 +- src/quotamodel.c | 14 +++++++------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/diskquota.c b/src/diskquota.c index 92389eac847..f714a4f0e47 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -77,7 +77,7 @@ bool diskquota_hardlimit = false; int diskquota_max_workers = 10; int diskquota_max_table_segments = 0; int diskquota_max_monitored_databases = 0; -int diskquota_max_quotas = 0; +int diskquota_max_quota_probes = 0; DiskQuotaLocks diskquota_locks; ExtensionDDLMessage *extension_ddl_message = NULL; @@ -411,8 +411,9 @@ define_guc_variables(void) INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable("diskquota.max_monitored_databases", "Max number of database on the cluster.", NULL, &diskquota_max_monitored_databases, 50, 1, 1024, PGC_POSTMASTER, 0, NULL, NULL, NULL); - DefineCustomIntVariable("diskquota.max_quotas", "Max number of quotas on the cluster.", NULL, &diskquota_max_quotas, - 1024 * 1024, 1024 * INIT_QUOTA_MAP_ENTRIES, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); + DefineCustomIntVariable("diskquota.max_quota_probes", "Max number of quotas on the cluster.", NULL, + &diskquota_max_quota_probes, 1024 * 1024, 1024 * INIT_QUOTA_MAP_ENTRIES, INT_MAX, + PGC_POSTMASTER, 0, NULL, NULL, NULL); } /* ---- Functions for disk quota worker process ---- */ diff --git a/src/diskquota.h b/src/diskquota.h index b9d90ca08ab..9ded6856655 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -42,7 +42,7 @@ #define MAX_NUM_KEYS_QUOTA_MAP 8 /* init number of QuotaInfoEntry in quota_info_map */ #define INIT_QUOTA_MAP_ENTRIES 128 -#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quotas / diskquota_max_monitored_databases) +#define AVG_QUOTA_MAP_ENTRIES (diskquota_max_quota_probes / diskquota_max_monitored_databases) /* max number of QuotaInfoEntry in quota_info_map */ #define MAX_QUOTA_MAP_ENTRIES (AVG_QUOTA_MAP_ENTRIES < 1024 ? 1024 : AVG_QUOTA_MAP_ENTRIES) diff --git a/src/quotamodel.c b/src/quotamodel.c index 93c80e608bc..5a618f71eac 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -87,7 +87,7 @@ int SEGCOUNT = 0; extern int diskquota_max_table_segments; extern pg_atomic_uint32 *diskquota_table_size_entry_num; extern int diskquota_max_monitored_databases; -extern int diskquota_max_quotas; +extern int diskquota_max_quota_probes; extern pg_atomic_uint32 *diskquota_quota_info_entry_num; /* @@ -232,7 +232,7 @@ put_quota_map_entry(QuotaInfoEntryKey *key, bool *found) { QuotaInfoEntry *entry; uint32 counter = pg_atomic_read_u32(diskquota_quota_info_entry_num); - if (counter >= diskquota_max_quotas) + if (counter >= diskquota_max_quota_probes) { entry = hash_search(quota_info_map, key, HASH_FIND, found); /* @@ -248,12 +248,12 @@ put_quota_map_entry(QuotaInfoEntryKey *key, bool *found) if (!(*found)) { counter = pg_atomic_add_fetch_u32(diskquota_quota_info_entry_num, 1); - if (counter >= diskquota_max_quotas) + if (counter >= diskquota_max_quota_probes) { - ereport(WARNING, (errmsg("[diskquota] the number of quota exceeds the limit, please increase " - "the GUC value for diskquota.max_quotas. Current " - "diskquota.max_quotas value: %d", - diskquota_max_quotas))); + ereport(WARNING, (errmsg("[diskquota] the number of quota probe exceeds the limit, please " + "increase the GUC value for diskquota.max_quota_probes. Current " + "diskquota.max_quota_probes value: %d", + diskquota_max_quota_probes))); } } } From 9fd7a2232f25ce8810c6df57c8bca9fd3da5549d Mon Sep 17 00:00:00 2001 From: Zhang Hao Date: Tue, 7 Nov 2023 16:06:01 +0800 Subject: [PATCH 324/330] Fix wrong shmem allocation. (#401) - DDL message is only used on master, so it is unnecessary to allocate the memory on segments. - The launcher shmem should not be initialized on segments. --- src/quotamodel.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/quotamodel.c b/src/quotamodel.c index 5a618f71eac..34709e142aa 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -446,9 +446,6 @@ disk_quota_shmem_startup(void) * to store out-of-quota rejectmap. active_tables_map is used to store * active tables whose disk usage is changed. */ - extension_ddl_message = ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); - if (!found) memset((void *)extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(RejectMapEntry); hash_ctl.entrysize = sizeof(GlobalRejectMapEntry); @@ -467,7 +464,16 @@ disk_quota_shmem_startup(void) monitored_dbid_cache = DiskquotaShmemInitHash("table oid cache which shoud tracking", diskquota_max_monitored_databases, diskquota_max_monitored_databases, &hash_ctl, HASH_ELEM, DISKQUOTA_OID_HASH); - init_launcher_shmem(); + + /* only initialize ddl_message and launcher memory on master/standby. */ + if (IS_QUERY_DISPATCHER()) + { + extension_ddl_message = + ShmemInitStruct("disk_quota_extension_ddl_message", sizeof(ExtensionDDLMessage), &found); + if (!found) memset((void *)extension_ddl_message, 0, sizeof(ExtensionDDLMessage)); + + init_launcher_shmem(); + } LWLockRelease(AddinShmemInitLock); } @@ -525,8 +531,8 @@ diskquota_worker_shmem_size() static Size DiskQuotaShmemSize(void) { - Size size; - size = sizeof(ExtensionDDLMessage); + Size size = 0; + size = add_size(size, hash_estimate_size(MAX_DISK_QUOTA_REJECT_ENTRIES, sizeof(GlobalRejectMapEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaActiveTableEntry))); size = add_size(size, hash_estimate_size(diskquota_max_active_tables, sizeof(DiskQuotaRelationCacheEntry))); @@ -537,6 +543,7 @@ DiskQuotaShmemSize(void) if (IS_QUERY_DISPATCHER()) { + size = add_size(size, sizeof(ExtensionDDLMessage)); size = add_size(size, diskquota_launcher_shmem_size()); size = add_size(size, sizeof(pg_atomic_uint32)); size = add_size(size, diskquota_worker_shmem_size() * diskquota_max_monitored_databases); From 27be29f4a31ae757fb4e2a0b58df19fddd993a91 Mon Sep 17 00:00:00 2001 From: Dennis Kovalenko Date: Wed, 10 Jan 2024 12:58:25 +0400 Subject: [PATCH 325/330] Free tuple after use (#414) refresh_rejectmap looks for a tuple using SearchSysCacheCopy1 which retrieves a copy of the tuple allocating memory for it. However, refresh_rejectmap didn't free these tuple copies after use. If lots of oids were passed, diskquota could work incorrectly because of huge memory leak. This patch frees these tuples and prevents memory leaks. --- src/quotamodel.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/quotamodel.c b/src/quotamodel.c index 34709e142aa..836ce8f0736 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -2028,6 +2028,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) memcpy(&blocked_filenode_entry->auxblockinfo, &keyitem, sizeof(RejectMapEntry)); blocked_filenode_entry->segexceeded = rejectmapentry->segexceeded; } + + heap_freetuple(curr_tuple); } } /* @@ -2037,6 +2039,8 @@ refresh_rejectmap(PG_FUNCTION_ARGS) break; } } + + heap_freetuple(tuple); } else { From 1aa37985382bf62990a70a0c4cf3a84be9b0e020 Mon Sep 17 00:00:00 2001 From: yihong Date: Mon, 22 Jan 2024 16:40:09 +0800 Subject: [PATCH 326/330] fix: update acions (#417) fix: update actions Signed-off-by: yihong0618 --- .github/workflows/check.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 293a7b94e8b..2b5305a7b8f 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -10,13 +10,16 @@ on: - "*.md" - "*.sql" +concurrency: + group: ${{ github.event.number || github.run_id }} + cancel-in-progress: true + jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: use clang-format 13 run: pip3 install clang-format==13.0.1 - name: Check clang code style run: git ls-files *.{c,h} | xargs clang-format -i --style=file && git diff --exit-code - From 467fcf68254f678e19aaacc4cac9d84146501148 Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Thu, 7 Mar 2024 11:18:39 +0500 Subject: [PATCH 327/330] Invalidate diskquota.table_size entries during startup (#406) Diskquota calculates sizes and stores information in the diskquota.table_size table periodically with a pause in diskquota.naptime, 2 seconds by default. If we restart the cluster during this pause, then diskquota will lose all changes that have occurred since the last save to the diskquota.table_size table. We could create temporary tables, wait when it will be flushed to diskquota.table_size table, restart the cluster, and diskquota would remember the information about the temporary tables. Or we could delete the tables, restart the cluster, and again diskquota will remember information about the deleted tables. This happens because at the start of the cluster, diskquota remembers all the information written to the diskquota.table_size table, but does not check that some tables may have already been deleted. As a solution, we invalidate diskquota.table_size during diskquota worker start in addition to pg_class validation. The remaining problem: the incorrect table size cannot be refreshed until the corresponding table becomes an active table. Solution: call diskquota.init_table_size_table(). --- src/diskquota.h | 2 +- src/diskquota_utility.c | 18 +++-- src/quotamodel.c | 28 +++++++- .../expected/test_dropped_table.out | 72 +++++++++++++++++++ .../expected/test_temporary_table.out | 70 ++++++++++++++++++ .../expected7/test_dropped_table.out | 72 +++++++++++++++++++ .../expected7/test_temporary_table.out | 70 ++++++++++++++++++ tests/isolation2/isolation2_schedule | 2 + tests/isolation2/sql/test_dropped_table.sql | 29 ++++++++ tests/isolation2/sql/test_temporary_table.sql | 28 ++++++++ 10 files changed, 384 insertions(+), 7 deletions(-) create mode 100644 tests/isolation2/expected/test_dropped_table.out create mode 100644 tests/isolation2/expected/test_temporary_table.out create mode 100644 tests/isolation2/expected7/test_dropped_table.out create mode 100644 tests/isolation2/expected7/test_temporary_table.out create mode 100644 tests/isolation2/sql/test_dropped_table.sql create mode 100644 tests/isolation2/sql/test_temporary_table.sql diff --git a/src/diskquota.h b/src/diskquota.h index 9ded6856655..b3d3481c86f 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -290,7 +290,7 @@ extern bool diskquota_hardlimit; extern int SEGCOUNT; extern int worker_spi_get_extension_version(int *major, int *minor); extern void truncateStringInfo(StringInfo str, int nchars); -extern List *get_rel_oid_list(void); +extern List *get_rel_oid_list(bool is_init); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); extern Relation diskquota_relation_open(Oid relid); extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); diff --git a/src/diskquota_utility.c b/src/diskquota_utility.c index 00dab97b520..28b874e8ade 100644 --- a/src/diskquota_utility.c +++ b/src/diskquota_utility.c @@ -113,8 +113,6 @@ static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb); -List *get_rel_oid_list(void); - /* ---- Help Functions to set quota limit. ---- */ /* * Initialize table diskquota.table_size. @@ -1296,17 +1294,24 @@ worker_spi_get_extension_version(int *major, int *minor) * Get the list of oids of the tables which diskquota * needs to care about in the database. * Firstly the all the table oids which relkind is 'r' - * or 'm' and not system table. + * or 'm' and not system table. On init stage, oids from + * diskquota.table_size are added to invalidate them. * Then, fetch the indexes of those tables. */ List * -get_rel_oid_list(void) +get_rel_oid_list(bool is_init) { List *oidlist = NIL; int ret; - ret = SPI_execute_with_args("select oid from pg_class where oid >= $1 and (relkind='r' or relkind='m')", 1, +#define SELECT_FROM_PG_CATALOG_PG_CLASS "select oid from pg_catalog.pg_class where oid >= $1 and relkind in ('r', 'm')" + + ret = SPI_execute_with_args(is_init ? SELECT_FROM_PG_CATALOG_PG_CLASS + " union distinct" + " select tableid from diskquota.table_size where segid = -1" + : SELECT_FROM_PG_CATALOG_PG_CLASS, + 1, (Oid[]){ OIDOID, }, @@ -1314,6 +1319,9 @@ get_rel_oid_list(void) ObjectIdGetDatum(FirstNormalObjectId), }, NULL, false, 0); + +#undef SELECT_FROM_PG_CATALOG_PG_CLASS + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); TupleDesc tupdesc = SPI_tuptable->tupdesc; diff --git a/src/quotamodel.c b/src/quotamodel.c index 836ce8f0736..764b638b0c7 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -222,6 +222,8 @@ static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void delete_from_table_size_map(char *str); + /* * put QuotaInfoEntry into quota_info_map and return this entry. * return NULL: no free SHM for quota_info_map @@ -918,6 +920,10 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) TableEntryKey active_table_key; List *oidlist; ListCell *l; + int delete_entries_num = 0; + StringInfoData delete_statement; + + initStringInfo(&delete_statement); /* * unset is_exist flag for tsentry in table_size_map this is used to @@ -934,7 +940,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) * calculate the file size for active table and update namespace_size_map * and role_size_map */ - oidlist = get_rel_oid_list(); + oidlist = get_rel_oid_list(is_init); oidlist = merge_uncommitted_table_to_oidlist(oidlist); @@ -968,6 +974,23 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { elog(WARNING, "cache lookup failed for relation %u", relOid); LWLockRelease(diskquota_locks.relation_cache_lock); + + if (!is_init) continue; + + for (int i = -1; i < SEGCOUNT; i++) + { + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", relOid, i); + + delete_entries_num++; + + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + } + continue; } relnamespace = relation_entry->namespaceoid; @@ -1107,6 +1130,9 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) } } + if (delete_entries_num) delete_from_table_size_map(delete_statement.data); + + pfree(delete_statement.data); list_free(oidlist); /* diff --git a/tests/isolation2/expected/test_dropped_table.out b/tests/isolation2/expected/test_dropped_table.out new file mode 100644 index 00000000000..0af4cabd337 --- /dev/null +++ b/tests/isolation2/expected/test_dropped_table.out @@ -0,0 +1,72 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA dropped_schema; +CREATE +1: SET search_path TO dropped_schema; +SET +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO dropped_table SELECT generate_series(1, 10000); +INSERT 10000 +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: DROP TABLE dropped_table; +DROP +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA dropped_schema CASCADE; +DROP +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected/test_temporary_table.out b/tests/isolation2/expected/test_temporary_table.out new file mode 100644 index 00000000000..8fa95c5e291 --- /dev/null +++ b/tests/isolation2/expected/test_temporary_table.out @@ -0,0 +1,70 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA temporary_schema; +CREATE +1: SET search_path TO temporary_schema; +SET +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO temporary_table SELECT generate_series(1, 10000); +INSERT 10000 +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA temporary_schema CASCADE; +DROP +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected7/test_dropped_table.out b/tests/isolation2/expected7/test_dropped_table.out new file mode 100644 index 00000000000..443e04bd91b --- /dev/null +++ b/tests/isolation2/expected7/test_dropped_table.out @@ -0,0 +1,72 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA dropped_schema; +CREATE SCHEMA +1: SET search_path TO dropped_schema; +SET +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +CREATE TABLE +1: INSERT INTO dropped_table SELECT generate_series(1, 10000); +INSERT 0 10000 +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: DROP TABLE dropped_table; +DROP TABLE +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA dropped_schema CASCADE; +DROP SCHEMA +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/expected7/test_temporary_table.out b/tests/isolation2/expected7/test_temporary_table.out new file mode 100644 index 00000000000..cc666691c36 --- /dev/null +++ b/tests/isolation2/expected7/test_temporary_table.out @@ -0,0 +1,70 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) + +1: CREATE SCHEMA temporary_schema; +CREATE SCHEMA +1: SET search_path TO temporary_schema; +SET +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +CREATE TABLE +1: INSERT INTO temporary_table SELECT generate_series(1, 10000); +INSERT 0 10000 +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA temporary_schema CASCADE; +DROP SCHEMA +1q: ... + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +-- end_ignore +(exited with code 0) +!\retcode gpstop -u; +-- start_ignore +-- end_ignore +(exited with code 0) diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 090c5cc58f2..5ed558d693a 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -5,6 +5,8 @@ test: test_relation_size test: test_rejectmap test: test_vacuum test: test_truncate +test: test_temporary_table +test: test_dropped_table test: test_postmaster_restart test: test_worker_timeout test: test_per_segment_config diff --git a/tests/isolation2/sql/test_dropped_table.sql b/tests/isolation2/sql/test_dropped_table.sql new file mode 100644 index 00000000000..56652a79043 --- /dev/null +++ b/tests/isolation2/sql/test_dropped_table.sql @@ -0,0 +1,29 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +!\retcode gpstop -u; + +1: CREATE SCHEMA dropped_schema; +1: SET search_path TO dropped_schema; +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO dropped_table SELECT generate_series(1, 10000); +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: DROP TABLE dropped_table; +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA dropped_schema CASCADE; +1q: + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpstop -u; diff --git a/tests/isolation2/sql/test_temporary_table.sql b/tests/isolation2/sql/test_temporary_table.sql new file mode 100644 index 00000000000..381731791b0 --- /dev/null +++ b/tests/isolation2/sql/test_temporary_table.sql @@ -0,0 +1,28 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; +!\retcode gpstop -u; + +1: CREATE SCHEMA temporary_schema; +1: SET search_path TO temporary_schema; +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO temporary_table SELECT generate_series(1, 10000); +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA temporary_schema CASCADE; +1q: + +!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +!\retcode gpstop -u; From c863c4f07cd87e557dbf22d6b087d4e93c905825 Mon Sep 17 00:00:00 2001 From: Dianjin Wang Date: Tue, 9 Dec 2025 17:31:39 +0800 Subject: [PATCH 328/330] Remove legacy CI and unused files from diskquota Cleanup the invalid files after importing diskquota extension from upstream. The specific changes include: 1. Remove .github directory: The upstream GitHub Actions workflows are no longer applicable in the Cloudberry repository. 2. Remove .gitmessage, .editorconfig, .clang-format: Code style and commit templates should follow Cloudberry's main repository standards. 3. Remove SECURITY.md: Legacy security policy. 4. Remove concourse directory: Legacy CI scripts. This cleanup makes the extension structure cleaner and more consistent with other contrib modules. --- gpcontrib/diskquota/.clang-format | 40 ------ gpcontrib/diskquota/.editorconfig | 26 ---- .../diskquota/.github/workflows/check.yml | 25 ---- .../diskquota/.gitmessage/commit.template | 22 --- gpcontrib/diskquota/SECURITY.md | 125 ------------------ gpcontrib/diskquota/concourse/pre_build.sh | 8 -- gpcontrib/diskquota/concourse/pre_test.sh | 12 -- gpcontrib/diskquota/concourse/test.sh | 51 ------- 8 files changed, 309 deletions(-) delete mode 100644 gpcontrib/diskquota/.clang-format delete mode 100644 gpcontrib/diskquota/.editorconfig delete mode 100644 gpcontrib/diskquota/.github/workflows/check.yml delete mode 100644 gpcontrib/diskquota/.gitmessage/commit.template delete mode 100644 gpcontrib/diskquota/SECURITY.md delete mode 100755 gpcontrib/diskquota/concourse/pre_build.sh delete mode 100755 gpcontrib/diskquota/concourse/pre_test.sh delete mode 100755 gpcontrib/diskquota/concourse/test.sh diff --git a/gpcontrib/diskquota/.clang-format b/gpcontrib/diskquota/.clang-format deleted file mode 100644 index 63a1d6358fc..00000000000 --- a/gpcontrib/diskquota/.clang-format +++ /dev/null @@ -1,40 +0,0 @@ ---- -BasedOnStyle: Google - -ColumnLimit: 120 - -# How much whitespace? -UseTab: ForIndentation -TabWidth: 4 -IndentWidth: 4 -ContinuationIndentWidth: 8 - -SpacesBeforeTrailingComments: 1 - -# Line things up -AccessModifierOffset: -4 # outdent `public:`, etc - -DerivePointerAlignment: false -PointerAlignment: Right # char *foo, char &bar - -AlignConsecutiveAssignments: true -AlignConsecutiveDeclarations: true - -# Braces -AlwaysBreakAfterReturnType: TopLevelDefinitions -AllowShortFunctionsOnASingleLine: Inline -BreakBeforeBraces: Custom -BraceWrapping: - AfterStruct: true - AfterClass: true - AfterEnum: true - AfterFunction: true - AfterControlStatement: true - AfterNamespace: false - AfterExternBlock: false - BeforeCatch: true - SplitEmptyFunction: false - SplitEmptyRecord: false - BeforeElse: true - -SortIncludes: false diff --git a/gpcontrib/diskquota/.editorconfig b/gpcontrib/diskquota/.editorconfig deleted file mode 100644 index 053c91fd382..00000000000 --- a/gpcontrib/diskquota/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -root = true - -[*.{c,cpp,h,y}] -indent_style = tab -indent_size = 4 - -[{GNUmakefile,Makefile}*] -indent_style = tab -indent_size = 4 - -[*.mk] -indent_style = tab -indent_size = 4 - -[*.py] -indent_style = space -indent_size = 4 - -[{*.cmake,CMakeLists.txt}] -indent_style = space -indent_size = 2 - -[cmake/**.cmake] -indent_style = space -indent_size = 4 - diff --git a/gpcontrib/diskquota/.github/workflows/check.yml b/gpcontrib/diskquota/.github/workflows/check.yml deleted file mode 100644 index 2b5305a7b8f..00000000000 --- a/gpcontrib/diskquota/.github/workflows/check.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Check - -on: - pull_request: - paths-ignore: - - "docs/**" - - "cmake/**" - - "test/**" - - "upgrade_test/**" - - "*.md" - - "*.sql" - -concurrency: - group: ${{ github.event.number || github.run_id }} - cancel-in-progress: true - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: use clang-format 13 - run: pip3 install clang-format==13.0.1 - - name: Check clang code style - run: git ls-files *.{c,h} | xargs clang-format -i --style=file && git diff --exit-code diff --git a/gpcontrib/diskquota/.gitmessage/commit.template b/gpcontrib/diskquota/.gitmessage/commit.template deleted file mode 100644 index bbc420e5ac8..00000000000 --- a/gpcontrib/diskquota/.gitmessage/commit.template +++ /dev/null @@ -1,22 +0,0 @@ -# Title: Summary, imperative, start upper case, don't end with a period -# No more than 50 chars. #### 50 chars is here: # - -# Remember blank line between title and body. - -# Body: Explain *what* and *why* (not *how*). Include task ID (tracker issue). -# Wrap at 72 chars. ################################## which is here: # - -# At the end: Include Co-authored-by for all contributors. -# Include at least one empty line before it. Format: -# Co-authored-by: name -# -# How to Write a Git Commit Message: -# https://chris.beams.io/posts/git-commit/ -# -# 1.Separate subject from body with a blank line -# 2. Limit the subject line to 50 characters -# 3. Capitalize the subject line -# 4. Do not end the subject line with a period -# 5. Use the imperative mood in the subject line -# 6. Wrap the body at 72 characters -# 7. Use the body to explain what and why vs. how diff --git a/gpcontrib/diskquota/SECURITY.md b/gpcontrib/diskquota/SECURITY.md deleted file mode 100644 index fabbb9d2eb9..00000000000 --- a/gpcontrib/diskquota/SECURITY.md +++ /dev/null @@ -1,125 +0,0 @@ -# Security Release Process - -Greenplum Database has adopted this security disclosure and response policy to -ensure we responsibly handle critical issues. - -## Reporting a Vulnerability - Private Disclosure Process - -Security is of the highest importance and all security vulnerabilities or -suspected security vulnerabilities should be reported to Greenplum Database -privately, to minimize attacks against current users of Greenplum Database -before they are fixed. Vulnerabilities will be investigated and patched on the -next patch (or minor) release as soon as possible. This information could be -kept entirely internal to the project. - -If you know of a publicly disclosed security vulnerability for Greenplum -Database, please **IMMEDIATELY** contact the Greenplum Database project team -(security@greenplum.org). - -**IMPORTANT: Do not file public issues on GitHub for security vulnerabilities!** - -To report a vulnerability or a security-related issue, please contact the email -address with the details of the vulnerability. The email will be fielded by the -Greenplum Database project team. Emails will be addressed promptly, including a -detailed plan to investigate the issue and any potential workarounds to perform -in the meantime. Do not report non-security-impacting bugs through this -channel. Use [GitHub issues](https://github.com/greenplum-db/gpdb/issues) -instead. - -## Proposed Email Content - -Provide a descriptive subject line and in the body of the email include the -following information: - -* Basic identity information, such as your name and your affiliation or company. -* Detailed steps to reproduce the vulnerability (POC scripts, screenshots, and - logs are all helpful to us). -* Description of the effects of the vulnerability on Greenplum Database and the - related hardware and software configurations, so that the Greenplum Database - project team can reproduce it. -* How the vulnerability affects Greenplum Database usage and an estimation of - the attack surface, if there is one. -* List other projects or dependencies that were used in conjunction with - Greenplum Database to produce the vulnerability. - -## When to report a vulnerability - -* When you think Greenplum Database has a potential security vulnerability. -* When you suspect a potential vulnerability but you are unsure that it impacts - Greenplum Database. -* When you know of or suspect a potential vulnerability on another project that - is used by Greenplum Database. - -## Patch, Release, and Disclosure - -The Greenplum Database project team will respond to vulnerability reports as -follows: - -1. The Greenplum project team will investigate the vulnerability and determine -its effects and criticality. -2. If the issue is not deemed to be a vulnerability, the Greenplum project team -will follow up with a detailed reason for rejection. -3. The Greenplum project team will initiate a conversation with the reporter -promptly. -4. If a vulnerability is acknowledged and the timeline for a fix is determined, -the Greenplum project team will work on a plan to communicate with the -appropriate community, including identifying mitigating steps that affected -users can take to protect themselves until the fix is rolled out. -5. The Greenplum project team will also create a -[CVSS](https://www.first.org/cvss/specification-document) using the [CVSS -Calculator](https://www.first.org/cvss/calculator/3.0). The Greenplum project -team makes the final call on the calculated CVSS; it is better to move quickly -than making the CVSS perfect. Issues may also be reported to -[Mitre](https://cve.mitre.org/) using this [scoring -calculator](https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator). The CVE will -initially be set to private. -6. The Greenplum project team will work on fixing the vulnerability and perform -internal testing before preparing to roll out the fix. -7. A public disclosure date is negotiated by the Greenplum Database project -team, and the bug submitter. We prefer to fully disclose the bug as soon as -possible once a user mitigation or patch is available. It is reasonable to -delay disclosure when the bug or the fix is not yet fully understood, or the -solution is not well-tested. The timeframe for disclosure is from immediate -(especially if it’s already publicly known) to a few weeks. The Greenplum -Database project team holds the final say when setting a public disclosure -date. -8. Once the fix is confirmed, the Greenplum project team will patch the -vulnerability in the next patch or minor release, and backport a patch release -into earlier supported releases as necessary. Upon release of the patched -version of Greenplum Database, we will follow the **Public Disclosure -Process**. - -## Public Disclosure Process - -The Greenplum project team publishes a [public -advisory](https://github.com/greenplum-db/gpdb/security/advisories?state=published) -to the Greenplum Database community via GitHub. In most cases, additional -communication via Slack, Twitter, mailing lists, blog and other channels will -assist in educating Greenplum Database users and rolling out the patched -release to affected users. - -The Greenplum project team will also publish any mitigating steps users can -take until the fix can be applied to their Greenplum Database instances. - -## Mailing lists - -* Use security@greenplum.org to report security concerns to the Greenplum - Database project team, who uses the list to privately discuss security issues - and fixes prior to disclosure. - -## Confidentiality, integrity and availability - -We consider vulnerabilities leading to the compromise of data confidentiality, -elevation of privilege, or integrity to be our highest priority concerns. -Availability, in particular in areas relating to DoS and resource exhaustion, -is also a serious security concern. The Greenplum Database project team takes -all vulnerabilities, potential vulnerabilities, and suspected vulnerabilities -seriously and will investigate them in an urgent and expeditious manner. - -Note that we do not currently consider the default settings for Greenplum -Database to be secure-by-default. It is necessary for operators to explicitly -configure settings, role based access control, and other resource related -features in Greenplum Database to provide a hardened Greenplum Database -environment. We will not act on any security disclosure that relates to a lack -of safe defaults. Over time, we will work towards improved safe-by-default -configuration, taking into account backwards compatibility. diff --git a/gpcontrib/diskquota/concourse/pre_build.sh b/gpcontrib/diskquota/concourse/pre_build.sh deleted file mode 100755 index 9d9adf90a7b..00000000000 --- a/gpcontrib/diskquota/concourse/pre_build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# shellcheck source=/dev/null -source "$CI_REPO_DIR/common/entry_common.sh" - -install_cmake diff --git a/gpcontrib/diskquota/concourse/pre_test.sh b/gpcontrib/diskquota/concourse/pre_test.sh deleted file mode 100755 index 6e7319eb8e4..00000000000 --- a/gpcontrib/diskquota/concourse/pre_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# shellcheck source=/dev/null -source "$CI_REPO_DIR/common/entry_common.sh" - -install_cmake - -start_gpdb - -create_fake_gpdb_src diff --git a/gpcontrib/diskquota/concourse/test.sh b/gpcontrib/diskquota/concourse/test.sh deleted file mode 100755 index 6ed0cffba5d..00000000000 --- a/gpcontrib/diskquota/concourse/test.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -l - -set -exo pipefail - -function activate_standby() { - gpstop -may -M immediate - export MASTER_DATA_DIRECTORY=$(readlink /home/gpadmin/gpdb_src)/gpAux/gpdemo/datadirs/standby - if [[ $PGPORT -eq 6000 ]] - then - export PGPORT=6001 - else - export PGPORT=7001 - export COORDINATOR_DATA_DIRECTORY=$MASTER_DATA_DIRECTORY - fi - gpactivatestandby -a -f -d $MASTER_DATA_DIRECTORY -} - -function _main() { - local tmp_dir="$(mktemp -d)" - tar -xzf /home/gpadmin/bin_diskquota/diskquota-*-*.tar.gz -C "$tmp_dir" - pushd "$tmp_dir" - ./install_gpdb_component - popd - - source /home/gpadmin/gpdb_src/gpAux/gpdemo/gpdemo-env.sh - - pushd /home/gpadmin/gpdb_src - make -C src/test/isolation2 install - popd - - pushd /home/gpadmin/bin_diskquota - # Show regress diff if test fails - export SHOW_REGRESS_DIFF=1 - time cmake --build . --target installcheck - # Run test again with standby master - activate_standby - time cmake --build . --target installcheck - if [[ $OS_NAME != "rhel9" ]] - then - # Run upgrade test (with standby master) - time cmake --build . --target upgradecheck - fi - popd - - if [[ $OS_NAME != "rhel9" ]] - then - time /home/gpadmin/diskquota_src/upgrade_test/alter_test.sh - fi -} - -_main From fb654eac0fdc655a99b1ca11678de1103bd7d822 Mon Sep 17 00:00:00 2001 From: Dianjin Wang Date: Tue, 9 Dec 2025 18:32:41 +0800 Subject: [PATCH 329/330] Add diskquota support for Apache Cloudberry Integrate diskquota extension into Apache Cloudberry build system and adapt the codebase for Cloudberry 2.0+ (PostgreSQL 14 based). Main changes: Build system integration: * Add new Makefile for building with Cloudberry source tree ``` make make install make installcheck make clean ``` * Update gpcontrib/Makefile to include diskquota in build and installcheck * Simplify CMakeLists.txt by removing GP6 version conditionals * Add PG_SRC_DIR availability check for isolation2 tests Code modernization (remove GP6 compatibility code): * Remove GP_VERSION_NUM < 70000 conditionals throughout codebase * Replace deprecated APIs: heap_open -> table_open, heap_beginscan_catalog -> table_beginscan_catalog, heap_endscan -> table_endscan, etc. * Replace init_ps_display() with set_ps_display() for process status * Replace StrNCpy() with snprintf() for safer string handling * Remove WaitForBackgroundWorkerShutdown() polyfill (now in core) * Remove MemoryAccounting_Reset() calls (removed in GP7+) * Update tuple descriptor attribute access from pointer to direct access Documentation: * Rewrite README.md for Apache Cloudberry with updated build instructions Other improvements: * Update extension comment to be more descriptive * Ensure postgres.h is included first in all source files CI: * add ic-diskquota to `build-cloudberry.yml` workflow * For `build-deb-cloudberry.yml`, the installation and configure prefix are not consisent, which results in the test error. Will add ic-diskquota test back once updating the deb workflow. See: https://lists.apache.org/thread/1zd80r1hvpwwh5fjd5yqgcc7sr4f27qr --- .github/workflows/build-cloudberry.yml | 14 +- .github/workflows/build-deb-cloudberry.yml | 10 +- gpcontrib/Makefile | 7 +- gpcontrib/diskquota/CMakeLists.txt | 8 +- gpcontrib/diskquota/Makefile | 76 ++ gpcontrib/diskquota/README.md | 394 +++++----- gpcontrib/diskquota/cmake/Gpdb.cmake | 46 +- gpcontrib/diskquota/cmake/Regress.cmake | 7 +- .../control/ddl/diskquota--1.0--2.0.sql | 284 ------- .../diskquota/control/ddl/diskquota--1.0.sql | 71 -- .../control/ddl/diskquota--2.0--1.0.sql | 87 --- .../control/ddl/diskquota--2.0--2.1.sql | 210 ----- .../diskquota/control/ddl/diskquota--2.0.sql | 313 -------- .../control/ddl/diskquota--2.1--2.0.sql | 200 ----- .../control/ddl/diskquota--2.1--2.2.sql | 63 -- .../diskquota/control/ddl/diskquota--2.1.sql | 318 -------- .../control/ddl/diskquota--2.2--2.1.sql | 56 -- .../diskquota/control/ddl/diskquota.control | 2 +- gpcontrib/diskquota/src/diskquota.c | 106 +-- gpcontrib/diskquota/src/diskquota.h | 9 - gpcontrib/diskquota/src/diskquota_utility.c | 59 +- gpcontrib/diskquota/src/gp_activetable.c | 19 +- gpcontrib/diskquota/src/monitored_db.c | 3 +- gpcontrib/diskquota/src/quotamodel.c | 43 +- gpcontrib/diskquota/src/relation_cache.c | 2 - gpcontrib/diskquota/tests/CMakeLists.txt | 73 +- .../tests/isolation2/expected/config.out | 44 ++ .../isolation2/expected/reset_config.out | 13 + .../{expected7 => expected}/setup.out | 0 .../expected/test_dropped_table.out | 44 ++ .../expected/test_per_segment_config.out | 20 +- .../expected/test_postmaster_restart.out | 50 +- .../isolation2/expected/test_rejectmap.out | 48 +- .../expected/test_temporary_table.out | 44 ++ .../expected/test_worker_timeout.out | 26 + .../expected7/test_create_extension.out | 15 - .../expected7/test_drop_extension.out | 12 - .../expected7/test_dropped_table.out | 72 -- .../expected7/test_ereport_from_seg.out | 62 -- .../expected7/test_fast_quota_view.out | 182 ----- .../expected7/test_per_segment_config.out | 269 ------- .../expected7/test_postmaster_restart.out | 162 ---- .../isolation2/expected7/test_rejectmap.out | 738 ------------------ .../expected7/test_relation_cache.out | 70 -- .../expected7/test_relation_size.out | 104 --- .../expected7/test_temporary_table.out | 70 -- .../isolation2/expected7/test_truncate.out | 86 -- .../isolation2/expected7/test_vacuum.out | 99 --- .../sql/test_postmaster_restart.in.sql | 2 +- .../regress/expected/test_appendonly.out | 8 +- .../tests/regress/expected/test_column.out | 2 +- .../tests/regress/expected/test_copy.out | 2 +- .../regress/expected/test_ctas_pause.out | 4 +- .../tests/regress/expected/test_ctas_role.out | 10 +- .../regress/expected/test_ctas_schema.out | 8 +- .../expected/test_ctas_tablespace_role.out | 8 +- .../expected/test_ctas_tablespace_schema.out | 8 +- .../regress/expected/test_delete_quota.out | 2 +- .../expected/test_drop_after_pause.out | 2 +- .../expected/test_drop_any_extension.out | 2 +- .../regress/expected/test_drop_table.out | 4 +- .../regress/expected/test_fast_disk_check.out | 4 +- .../expected/test_fetch_table_stat.out | 2 +- .../tests/regress/expected/test_index.out | 12 +- .../expected/test_init_table_size_table.out | 12 +- .../expected/test_insert_after_drop.out | 2 +- .../tests/regress/expected/test_partition.out | 2 +- .../expected/test_pause_and_resume.out | 2 +- .../test_pause_and_resume_multiple_db.out | 6 +- .../tests/regress/expected/test_rejectmap.out | 4 +- .../regress/expected/test_relation_cache.out | 2 +- .../regress/expected/test_relation_size.out | 8 +- .../tests/regress/expected/test_relkind.out | 2 +- .../tests/regress/expected/test_rename.out | 4 +- .../tests/regress/expected/test_reschema.out | 2 +- .../regress/expected/test_table_size.out | 2 +- .../tests/regress/expected/test_temp_role.out | 4 +- .../tests/regress/expected/test_toast.out | 2 +- .../tests/regress/expected/test_truncate.out | 4 +- .../expected/test_uncommitted_table_size.out | 14 +- .../tests/regress/expected/test_update.out | 2 +- .../tests/regress/expected/test_vacuum.out | 4 +- .../regress/expected/test_worker_schedule.out | 28 +- .../regress/expected7/test_appendonly.out | 78 -- .../expected7/test_init_table_size_table.out | 71 -- .../regress/expected7/test_rejectmap.out | 292 ------- .../regress/expected7/test_relation_cache.out | 127 --- .../expected7/test_uncommitted_table_size.out | 236 ------ .../diskquota/upgrade_test/CMakeLists.txt | 93 +-- .../diskquota/upgrade_test/alter_test.sh | 23 +- .../upgrade_test/expected/1.0_catalog.out | 135 ---- .../expected/1.0_cleanup_quota.out | 1 - .../upgrade_test/expected/1.0_install.out | 14 - .../expected/1.0_migrate_to_version_1.0.out | 12 - .../upgrade_test/expected/1.0_set_quota.out | 34 - .../1.0_test_in_2.0_quota_create_in_1.0.out | 10 - .../upgrade_test/expected/2.0_catalog.out | 272 ------- .../expected/2.0_cleanup_quota.out | 1 - .../upgrade_test/expected/2.0_install.out | 14 - .../expected/2.0_migrate_to_version_2.0.out | 10 - .../upgrade_test/expected/2.0_set_quota.out | 61 -- .../2.0_test_in_1.0_quota_create_in_2.0.out | 14 - .../2.0_test_in_2.1_quota_create_in_2.0.out | 16 - .../upgrade_test/expected/2.1_catalog.out | 303 ------- .../expected/2.1_cleanup_quota.out | 1 - .../upgrade_test/expected/2.1_install.out | 13 - .../expected/2.1_migrate_to_version_2.1.out | 10 - .../upgrade_test/expected/2.1_set_quota.out | 61 -- .../2.1_test_in_2.0_quota_create_in_2.1.out | 16 - .../2.1_test_in_2.2_quota_create_in_2.1.out | 16 - .../upgrade_test/expected/2.2_catalog.out | 18 +- .../expected/2.2_migrate_to_version_2.2.out | 6 +- .../upgrade_test/expected/2.2_set_quota.out | 6 +- .../2.2_test_in_2.1_quota_create_in_2.2.out | 16 - .../upgrade_test/expected/2.3_catalog.out | 18 +- .../expected/2.3_migrate_to_version_2.3.out | 6 +- .../upgrade_test/expected/2.3_set_quota.out | 8 +- .../diskquota/upgrade_test/expected/dummy.out | 0 .../upgrade_test/expected7/2.2_catalog.out | 308 -------- .../expected7/2.2_cleanup_quota.out | 1 - .../upgrade_test/expected7/2.2_install.out | 13 - .../expected7/2.2_migrate_to_version_2.2.out | 10 - .../upgrade_test/expected7/2.2_set_quota.out | 72 -- .../2.2_test_in_2.3_quota_create_in_2.2.out | 16 - .../upgrade_test/expected7/2.3_catalog.out | 308 -------- .../expected7/2.3_cleanup_quota.out | 1 - .../upgrade_test/expected7/2.3_install.out | 13 - .../expected7/2.3_migrate_to_version_2.3.out | 10 - .../upgrade_test/expected7/2.3_set_quota.out | 66 -- .../2.3_test_in_2.2_quota_create_in_2.3.out | 16 - .../diskquota/upgrade_test/schedule_1.0--2.0 | 8 - .../diskquota/upgrade_test/schedule_2.0--1.0 | 8 - .../diskquota/upgrade_test/schedule_2.0--2.1 | 8 - .../diskquota/upgrade_test/schedule_2.1--2.0 | 8 - .../diskquota/upgrade_test/schedule_2.1--2.2 | 8 - .../diskquota/upgrade_test/schedule_2.2--2.1 | 8 - .../upgrade_test/sql/1.0_catalog.sql | 80 -- .../upgrade_test/sql/1.0_cleanup_quota.sql | 1 - .../upgrade_test/sql/1.0_install.sql | 17 - .../sql/1.0_migrate_to_version_1.0.sql | 10 - .../sql/1.0_not_work_using_2.x_binary.sql | 21 - .../upgrade_test/sql/1.0_set_quota.sql | 25 - .../1.0_test_in_2.0_quota_create_in_1.0.sql | 11 - .../upgrade_test/sql/2.0_catalog.sql | 81 -- .../upgrade_test/sql/2.0_cleanup_quota.sql | 1 - .../upgrade_test/sql/2.0_install.sql | 17 - .../sql/2.0_migrate_to_version_2.0.sql | 8 - .../sql/2.0_not_work_using_1.x_binary.sql | 23 - .../upgrade_test/sql/2.0_set_quota.sql | 44 -- .../2.0_test_in_1.0_quota_create_in_2.0.sql | 16 - .../2.0_test_in_2.1_quota_create_in_2.0.sql | 17 - .../upgrade_test/sql/2.1_catalog.sql | 81 -- .../upgrade_test/sql/2.1_cleanup_quota.sql | 1 - .../upgrade_test/sql/2.1_install.sql | 17 - .../sql/2.1_migrate_to_version_2.1.sql | 8 - .../upgrade_test/sql/2.1_set_quota.sql | 44 -- .../2.1_test_in_2.0_quota_create_in_2.1.sql | 16 - .../2.1_test_in_2.2_quota_create_in_2.1.sql | 17 - .../diskquota/upgrade_test/sql/dummy.sql | 0 159 files changed, 831 insertions(+), 7515 deletions(-) create mode 100644 gpcontrib/diskquota/Makefile delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--1.0--2.0.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--1.0.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.0--1.0.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.0--2.1.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.0.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.1--2.0.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.1--2.2.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.1.sql delete mode 100644 gpcontrib/diskquota/control/ddl/diskquota--2.2--2.1.sql rename gpcontrib/diskquota/tests/isolation2/{expected7 => expected}/setup.out (100%) delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_create_extension.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_drop_extension.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_dropped_table.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_ereport_from_seg.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_fast_quota_view.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_per_segment_config.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_postmaster_restart.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_rejectmap.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_relation_cache.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_relation_size.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_temporary_table.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_truncate.out delete mode 100644 gpcontrib/diskquota/tests/isolation2/expected7/test_vacuum.out delete mode 100644 gpcontrib/diskquota/tests/regress/expected7/test_appendonly.out delete mode 100644 gpcontrib/diskquota/tests/regress/expected7/test_init_table_size_table.out delete mode 100644 gpcontrib/diskquota/tests/regress/expected7/test_rejectmap.out delete mode 100644 gpcontrib/diskquota/tests/regress/expected7/test_relation_cache.out delete mode 100644 gpcontrib/diskquota/tests/regress/expected7/test_uncommitted_table_size.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_catalog.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_cleanup_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_install.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_migrate_to_version_1.0.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_set_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_catalog.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_cleanup_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_install.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_migrate_to_version_2.0.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_set_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_catalog.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_cleanup_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_install.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_migrate_to_version_2.1.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_set_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected/dummy.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_catalog.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_cleanup_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_install.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_migrate_to_version_2.2.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_set_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_catalog.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_cleanup_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_install.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_migrate_to_version_2.3.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_set_quota.out delete mode 100644 gpcontrib/diskquota/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_1.0--2.0 delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_2.0--1.0 delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_2.0--2.1 delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_2.1--2.0 delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_2.1--2.2 delete mode 100644 gpcontrib/diskquota/upgrade_test/schedule_2.2--2.1 delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_catalog.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_cleanup_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_install.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_migrate_to_version_1.0.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_set_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_catalog.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_cleanup_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_install.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_migrate_to_version_2.0.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_set_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_catalog.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_cleanup_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_install.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_migrate_to_version_2.1.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_set_quota.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql delete mode 100644 gpcontrib/diskquota/upgrade_test/sql/dummy.sql diff --git a/.github/workflows/build-cloudberry.yml b/.github/workflows/build-cloudberry.yml index 04d5e827b6e..f5d159a63ac 100644 --- a/.github/workflows/build-cloudberry.yml +++ b/.github/workflows/build-cloudberry.yml @@ -307,6 +307,10 @@ jobs: "gpcontrib/gp_sparse_vector:installcheck", "gpcontrib/gp_toolkit:installcheck"] }, + {"test":"ic-diskquota", + "make_configs":["gpcontrib/diskquota:installcheck"], + "shared_preload_libraries":"diskquota-2.3" + }, {"test":"ic-fixme", "make_configs":["src/test/regress:installcheck-fixme"], "enable_core_check":false @@ -1265,7 +1269,15 @@ jobs: { chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh - if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then + + # Build BLDWRAP_POSTGRES_CONF_ADDONS for shared_preload_libraries if specified + EXTRA_CONF="" + if [[ -n "${{ matrix.shared_preload_libraries }}" ]]; then + EXTRA_CONF="shared_preload_libraries='${{ matrix.shared_preload_libraries }}'" + echo "Adding shared_preload_libraries: ${{ matrix.shared_preload_libraries }}" + fi + + if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' BLDWRAP_POSTGRES_CONF_ADDONS=\"${EXTRA_CONF}\" SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then echo "::error::Demo cluster creation failed" exit 1 fi diff --git a/.github/workflows/build-deb-cloudberry.yml b/.github/workflows/build-deb-cloudberry.yml index 38c2391376e..6b707a556cb 100644 --- a/.github/workflows/build-deb-cloudberry.yml +++ b/.github/workflows/build-deb-cloudberry.yml @@ -1234,7 +1234,15 @@ jobs: { chmod +x "${SRC_DIR}"/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh - if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then + + # Build BLDWRAP_POSTGRES_CONF_ADDONS for shared_preload_libraries if specified + EXTRA_CONF="" + if [[ -n "${{ matrix.shared_preload_libraries }}" ]]; then + EXTRA_CONF="shared_preload_libraries='${{ matrix.shared_preload_libraries }}'" + echo "Adding shared_preload_libraries: ${{ matrix.shared_preload_libraries }}" + fi + + if ! time su - gpadmin -c "cd ${SRC_DIR} && NUM_PRIMARY_MIRROR_PAIRS='${{ matrix.num_primary_mirror_pairs }}' BLDWRAP_POSTGRES_CONF_ADDONS=\"${EXTRA_CONF}\" SRC_DIR=${SRC_DIR} ${SRC_DIR}/devops/build/automation/cloudberry/scripts/create-cloudberry-demo-cluster.sh"; then echo "::error::Demo cluster creation failed" exit 1 fi diff --git a/gpcontrib/Makefile b/gpcontrib/Makefile index 60fef1778c6..8d95a14f876 100644 --- a/gpcontrib/Makefile +++ b/gpcontrib/Makefile @@ -22,7 +22,8 @@ ifeq "$(enable_debug_extensions)" "yes" gp_legacy_string_agg \ gp_replica_check \ gp_toolkit \ - pg_hint_plan + pg_hint_plan \ + diskquota else recurse_targets = gp_sparse_vector \ gp_distribution_policy \ @@ -30,7 +31,8 @@ else gp_legacy_string_agg \ gp_exttable_fdw \ gp_toolkit \ - pg_hint_plan + pg_hint_plan \ + diskquota endif ifeq "$(with_zstd)" "yes" @@ -97,3 +99,4 @@ installcheck: $(MAKE) -C gp_sparse_vector installcheck $(MAKE) -C gp_toolkit installcheck $(MAKE) -C gp_exttable_fdw installcheck + $(MAKE) -C diskquota installcheck diff --git a/gpcontrib/diskquota/CMakeLists.txt b/gpcontrib/diskquota/CMakeLists.txt index 75be74a9f3d..fad393cb101 100644 --- a/gpcontrib/diskquota/CMakeLists.txt +++ b/gpcontrib/diskquota/CMakeLists.txt @@ -22,6 +22,10 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Gpdb.cmake) # set include directories for all sub-projects include_directories(${PG_INCLUDE_DIR_SERVER}) include_directories(${PG_INCLUDE_DIR}) # for libpq +# For in-tree builds, libpq headers are in a separate directory +if(PG_INCLUDE_DIR_LIBPQ) + include_directories(${PG_INCLUDE_DIR_LIBPQ}) +endif() # Overwrite the default build type flags set by cmake. # We don't want the '-O3 -DNDEBUG' from cmake. Instead, those will be set by the CFLAGS from pg_config. # And, the good news is, GPDB release always have '-g'. @@ -85,9 +89,7 @@ set_target_properties( C_STANDARD 99 LINKER_LANGUAGE "C") -if (${GP_MAJOR_VERSION} STRGREATER_EQUAL "7") - TARGET_LINK_LIBRARIES(diskquota ${PG_LIB_DIR}/libpq.so) -endif() +TARGET_LINK_LIBRARIES(diskquota ${PG_LIB_DIR}/libpq.so) # packing part, move to a separate file if this part is too large include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Distro.cmake) diff --git a/gpcontrib/diskquota/Makefile b/gpcontrib/diskquota/Makefile new file mode 100644 index 00000000000..1ae174f0d6f --- /dev/null +++ b/gpcontrib/diskquota/Makefile @@ -0,0 +1,76 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Makefile for diskquota extension +# +# This Makefile wraps the CMake build system for integration with +# the Cloudberry build process. +# +# Usage: +# make # build the extension +# make install # install the extension +# make installcheck # run regression tests +# make clean # clean build artifacts + +ifdef USE_PGXS +# Standalone build: pg_config must be in PATH +PG_CONFIG_ABS := $(shell which pg_config) +PG_PREFIX := $(shell $(PG_CONFIG_ABS) --prefix) +CMAKE_OPTS := -DPG_CONFIG=$(PG_CONFIG_ABS) +else +# In-tree build +subdir = gpcontrib/diskquota +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +# Get absolute source directory path +PG_SRC_DIR_ABS := $(shell cd $(top_builddir) && pwd) +PG_CONFIG_ABS := $(PG_SRC_DIR_ABS)/src/bin/pg_config/pg_config +PG_PREFIX := $(prefix) +# Pass PG_SRC_DIR to CMake so it doesn't try to derive it from pg_config +CMAKE_OPTS := -DPG_CONFIG=$(PG_CONFIG_ABS) -DPG_SRC_DIR=$(PG_SRC_DIR_ABS) +endif + +.PHONY: all +all: build + +.PHONY: build +build: + @echo "Building diskquota with CMake..." + @if [ ! -f build/Makefile ]; then \ + mkdir -p build && \ + cd build && \ + cmake $(CMAKE_OPTS) -DCMAKE_INSTALL_PREFIX=$(DESTDIR)$(PG_PREFIX) .. ; \ + fi + cd build && $(MAKE) + +.PHONY: install +install: build + cd build && $(MAKE) install + +.PHONY: installcheck +installcheck: + @echo "Running diskquota regression tests..." + @if [ ! -f build/Makefile ]; then \ + mkdir -p build && \ + cd build && \ + cmake $(CMAKE_OPTS) -DCMAKE_INSTALL_PREFIX=$(DESTDIR)$(PG_PREFIX) .. ; \ + fi + cd build && $(MAKE) installcheck + +.PHONY: clean +clean: + rm -rf build diff --git a/gpcontrib/diskquota/README.md b/gpcontrib/diskquota/README.md index 5be27dd5121..a6ce511273d 100644 --- a/gpcontrib/diskquota/README.md +++ b/gpcontrib/diskquota/README.md @@ -1,11 +1,18 @@ -# Overview +# Diskquota for Apache Cloudberry + +> **Note**: This project is forked from [greenplum-db/diskquota](https://github.com/greenplum-db/diskquota-archive) +> and has been adapted specifically for [Apache Cloudberry](https://cloudberry.apache.org/). +> It requires Apache Cloudberry 2.0+ (based on PostgreSQL 14). + +## Overview + Diskquota is an extension that provides disk usage enforcement for database -objects in Greenplum DB. Currently it supports to set quota limit on schema -and role in a given database and limit the amount of disk space that a schema +objects in Apache Cloudberry. Currently it supports setting quota limits on schema +and role in a given database and limiting the amount of disk space that a schema or a role can use. This project is inspired by Heikki's -[pg_quota project](https://github.com/hlinnaka/pg_quota) and enhance it in +[pg_quota project](https://github.com/hlinnaka/pg_quota) and enhances it in two aspects: 1. To support different kinds of DDL and DML which may change the disk usage @@ -13,28 +20,28 @@ of database objects. 2. To support diskquota extension on MPP architecture. -Diskquota is a soft limit of disk uages. On one hand it has some delay to +Diskquota is a soft limit of disk usage. On one hand it has some delay to detect the schemas or roles whose quota limit is exceeded. On the other hand, -'soft limit' supports two kinds of encforcement: Query loading data into +'soft limit' supports two kinds of enforcement: Query loading data into out-of-quota schema/role will be forbidden before query is running. Query loading data into schema/role with rooms will be cancelled when the quota limit is reached dynamically during the query is running. -# Design -Diskquota extension is based on background worker framework in Greenplum (bg -worker needs pg_verion >= 9.4, which is supported in Greenplum 6 and later). +## Design + +Diskquota extension is based on background worker framework in Apache Cloudberry. There are two kinds of background workers: diskquota launcher and diskquota worker. -There is only one launcher process per database master. There is no launcher +There is only one launcher process per database coordinator. There is no launcher process for segments. -Launcher process is reponsible for manage worker processes: Calling +Launcher process is responsible for managing worker processes: Calling RegisterDynamicBackgroundWorker() to create new workers and keep their handle. Calling TerminateBackgroundWorker() to terminate workers which are disabled -when DBA modify GUC diskquota.monitor_databases. +when DBA modifies GUC diskquota.monitor_databases. There are many worker processes, one for each database which is listed in diskquota.monitor_databases. Same as launcher process, worker processes -only run at master node. Since each worker process needs to call SPI to fetch +only run at coordinator node. Since each worker process needs to call SPI to fetch active table size, to limit the total cost of worker processes, we support to monitor at most 10 databases at the same time currently. Worker processes are responsible for monitoring the disk usage of schemas and roles for the target @@ -46,35 +53,38 @@ schemas or roles into the rejectmap in shared memory. Schemas or roles in rejectmap are used to do query enforcement to cancel queries which plan to load data into these schemas or roles. -From MPP perspective, diskquota launcher and worker processes are all run at -Master side. Master only design allows us to save the memory resource on -Segments, and simplifies the communication from Master to Segment by call SPI -queries periodically. Segments are used to detected the active table and -calculated the active table size. Master aggregate the table size from each -segments and maintain the disk quota model. +From MPP perspective, diskquota launcher and worker processes all run at +the Coordinator side. Coordinator-only design allows us to save the memory resource on +Segments, and simplifies the communication from Coordinator to Segment by calling SPI +queries periodically. Segments are used to detect the active tables and +calculate the active table size. Coordinator aggregates the table size from each +segment and maintains the disk quota model. + +### Active table -## Active table Active tables are the tables whose table size may change in the last quota check interval. Active tables are detected at Segment QE side: hooks in -smgecreate(), smgrextend() and smgrtruncate() are used to detect active tables +smgrcreate(), smgrextend() and smgrtruncate() are used to detect active tables and store them (currently relfilenode) in the shared memory. Diskquota worker process will periodically call dispatch queries to all the segments and -consume active tables in shared memories, convert relfilenode to relaton oid, -and calcualte table size by calling pg_table_size(), which will sum +consume active tables in shared memories, convert relfilenode to relation oid, +and calculate table size by calling pg_table_size(), which will sum the size of table (including: base, vm, fsm, toast) in each segment. -## Enforcement +### Enforcement + Enforcement is implemented as hooks. There are two kinds of enforcement hooks: enforcement before query is running and enforcement during query is running. The 'before query' one is implemented at ExecutorCheckPerms_hook in function -ExecCheckRTPerms() +ExecCheckRTPerms(). The 'during query' one is implemented at DispatcherCheckPerms_hook in function checkDispatchResult(). For queries loading a huge number of data, dispatcher -will poll the connnection with a poll timeout. Hook will be called at every +will poll the connection with a poll timeout. Hook will be called at every poll timeout with waitMode == DISPATCH_WAIT_NONE. Currently only async -diskpatcher supports 'during query' quota enforcement. +dispatcher supports 'during query' quota enforcement. + +### Quota setting store -## Quota setting store Quota limit of a schema or a role is stored in table 'quota_config' in 'diskquota' schema in monitored database. So each database stores and manages its own disk quota configuration. Note that although role is a db object in @@ -82,226 +92,257 @@ cluster level, we limit the diskquota of a role to be database specific. That is to say, a role may have different quota limit on different databases and their disk usage is isolated between databases. -# Development +## Development -[cmake](https://cmake.org) (>= 3.20) needs to be installed. +### Prerequisites -1. Build & install disk quota -``` -mkdir -p /build -cd /build -``` +The following packages need to be installed: -If the `greenplum_path.sh` has been source: +- openssl-devel +- krb5-devel +- [cmake](https://cmake.org) (>= 3.20) +On RHEL/CentOS/Rocky Linux: +```bash +sudo yum install openssl-devel krb5-devel cmake ``` -cmake .. + +On Ubuntu/Debian: +```bash +sudo apt-get install libssl-dev libkrb5-dev cmake ``` -Otherwise: +### Build & Install -``` -# Without source greenplum_path.sh -cmake .. --DPG_CONFIG=/bin/pg_config -# -``` +Diskquota uses CMake as its build system, wrapped by a Makefile for integration with the Cloudberry build process. -Build and install: +#### Option 1: Build with Apache Cloudberry Source Tree -``` +Diskquota is included in the Apache Cloudberry source tree: + +```bash +cd +./configure [options...] + +# Build everything +make -j$(nproc) make install -``` -2. Create database to store global information. +# Or build diskquota only +make -C gpcontrib/diskquota +make -C gpcontrib/diskquota install ``` -create database diskquota; + +#### Option 2: Standalone Build (without source tree) + +If you only have an installed Apache Cloudberry (no source tree): + +```bash +# Source the environment first +source /path/to/cloudberry-db/cloudberry-env.sh + +cd gpcontrib/diskquota +make +make install ``` -3. Enable diskquota as preload library +### Setup + +1. Create database to store global information: +```sql +CREATE DATABASE diskquota; ``` -# enable diskquota in preload library. + +2. Enable diskquota as preload library: +```bash +# Set USER environment variable if not set (required by gpconfig) +export USER=$(whoami) + +# enable diskquota in preload library gpconfig -c shared_preload_libraries -v 'diskquota-' -# restart database. +# restart database gpstop -ar ``` -4. Config GUC of diskquota. -``` -# set naptime ( second ) to refresh the disk quota stats periodically +3. Config GUC of diskquota: +```bash +# set naptime (seconds) to refresh the disk quota stats periodically gpconfig -c diskquota.naptime -v 2 ``` -5. Create diskquota extension in monitored database. -``` -create extension diskquota; +4. Create diskquota extension in monitored database: +```sql +CREATE EXTENSION diskquota; ``` -6. Initialize existing table size information is needed if `create extension` is not executed in a new created database. -``` -select diskquota.init_table_size_table(); +5. Initialize existing table size information (needed if `CREATE EXTENSION` is not executed in a newly created database): +```sql +SELECT diskquota.init_table_size_table(); ``` -## clang-format +## Usage -In order to pass the CI check for PR, the changed code needs to be formated by -[clang-format](https://clang.llvm.org/docs/ClangFormat.html) **13**. A static-linked -version can be found at https://github.com/beeender/clang-tools-static-binaries/releases/tag/master-7d0aff9a . +### Set/update/delete schema quota limit -To format all the source files in the git tree: +```sql +CREATE SCHEMA s1; +SELECT diskquota.set_schema_quota('s1', '1 MB'); +SET search_path TO s1; -``` -git ls-files '*.c' '*.h' | xargs clang-format --style=file -i +CREATE TABLE a(i int) DISTRIBUTED BY (i); +-- insert small data succeeded +INSERT INTO a SELECT generate_series(1,100); +-- insert large data failed +INSERT INTO a SELECT generate_series(1,10000000); +-- insert small data failed +INSERT INTO a SELECT generate_series(1,100); + +-- delete quota configuration +SELECT diskquota.set_schema_quota('s1', '-1'); +-- insert small data succeed +SELECT pg_sleep(5); +INSERT INTO a SELECT generate_series(1,100); +RESET search_path; ``` -If you have `git-clang-format` installed, it can be as easy as: +### Set/update/delete role quota limit -``` -git clang-format -``` +```sql +CREATE ROLE u1 NOLOGIN; +CREATE TABLE b (i int) DISTRIBUTED BY (i); +ALTER TABLE b OWNER TO u1; +SELECT diskquota.set_role_quota('u1', '1 MB'); -To skip formatting a certain piece of code: - -```c -/* clang-format off */ -#if SOME_MACRO -#define DO_NOT_FORMAT_ME \ - (1 \ - + \ - )\ -#endif -/* clang-format on */ -``` +-- insert small data succeeded +INSERT INTO b SELECT generate_series(1,100); +-- insert large data failed +INSERT INTO b SELECT generate_series(1,10000000); +-- insert small data failed +INSERT INTO b SELECT generate_series(1,100); -# Usage -1. Set/update/delete schema quota limit using diskquota.set_schema_quota -``` -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); -set search_path to s1; - -create table a(i int) DISTRIBUTED BY (i); -# insert small data succeeded -insert into a select generate_series(1,100); -# insert large data failed -insert into a select generate_series(1,10000000); -# insert small data failed -insert into a select generate_series(1,100); - -# delete quota configuration -select diskquota.set_schema_quota('s1', '-1'); -# insert small data succeed -select pg_sleep(5); -insert into a select generate_series(1,100); -reset search_path; +-- delete quota configuration +SELECT diskquota.set_role_quota('u1', '-1'); +-- insert small data succeed +SELECT pg_sleep(5); +INSERT INTO b SELECT generate_series(1,100); +RESET search_path; ``` -2. Set/update/delete role quota limit using diskquota.set_role_quota -``` -create role u1 nologin; -create table b (i int) DISTRIBUTED BY (i); -alter table b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - -# insert small data succeeded -insert into b select generate_series(1,100); -# insert large data failed -insert into b select generate_series(1,10000000); -# insert small data failed -insert into b select generate_series(1,100); - -# delete quota configuration -select diskquota.set_role_quota('u1', '-1'); -# insert small data succeed -select pg_sleep(5); -insert into a select generate_series(1,100); -reset search_path; -``` +### Show schema quota limit and current usage -3. Show schema quota limit and current usage -``` -select * from diskquota.show_fast_schema_quota_view; +```sql +SELECT * FROM diskquota.show_fast_schema_quota_view; ``` +## Test -# Test -Run regression tests: +Before running regression tests, make sure: + +1. The diskquota extension is installed (`make install`) on all nodes +2. The `shared_preload_libraries` is configured and the cluster is restarted +3. The `diskquota` database exists + +```bash +# Set USER environment variable if not set (required by gpconfig) +export USER=$(whoami) + +# Configure shared_preload_libraries (use current version) +gpconfig -c shared_preload_libraries -v 'diskquota-2.3' + +# Restart the cluster +gpstop -ar + +# Create diskquota database if not exists +createdb diskquota ``` -cd /build; + +Run regression tests: +```bash +# From source tree build: +make -C gpcontrib/diskquota installcheck + +# Or from build directory: +cd gpcontrib/diskquota/build make installcheck ``` + Show quick diff of regress results: -``` +```bash +cd gpcontrib/diskquota/build make diff__ ``` -Show all build target: -``` -make help -``` -# HA +## HA + Not implemented yet. One solution would be: start launcher process on standby -and enable it to fork worker processes when switch from standby Master to Master. +and enable it to fork worker processes when switching from standby Coordinator to Coordinator. -# Benchmark & Performence Test -## Cost of diskquota worker. +## Benchmark & Performance Test + +### Cost of diskquota worker To be added. -## Impact on OLTP queries +### Impact on OLTP queries To be added. -# Notes -1. Drop database with diskquota enabled. +## Notes + +### Drop database with diskquota enabled If DBA created diskquota extension in a database, there will be a connection -to this database from diskquota worker process. DBA need to firstly the drop diskquota -extension in this database, and then database could be dropped successfully. +to this database from diskquota worker process. DBA needs to first drop the diskquota +extension in this database, and then the database can be dropped successfully. -2. Temp table. +### Temp table -Diskquota supports to limit the disk usage of temp table as well. +Diskquota supports limiting the disk usage of temp tables as well. But schema and role are different. For role, i.e. the owner of the temp table, diskquota will treat it the same as normal tables and sum its table size to its owner's quota. While for schema, temp table is located under namespace -'pg_temp_backend_id', so temp table size will not sum to the current schema's qouta. +'pg_temp_backend_id', so temp table size will not be summed to the current schema's quota. -# Known Issue. +## Known Issues -1. Since Greenplum doesn't support READ UNCOMMITTED isolation level, -our implementation cannot detect the new created table inside an -uncommitted transaction (See below example). Hence enforcement on +### Uncommitted transactions + +Since Apache Cloudberry doesn't support READ UNCOMMITTED isolation level, +our implementation cannot detect the newly created table inside an +uncommitted transaction (see below example). Hence enforcement on that newly created table will not work. After transaction commit, -diskquota worker process could detect the newly create table -and do enfocement accordingly in later queries. -``` -# suppose quota of schema s1 is 1MB. -set search_path to s1; -create table b (i int) DISTRIBUTED BY (i); +diskquota worker process could detect the newly created table +and do enforcement accordingly in later queries. + +```sql +-- suppose quota of schema s1 is 1MB +SET search_path TO s1; +CREATE TABLE b (i int) DISTRIBUTED BY (i); BEGIN; -create table a (i int) DISTRIBUTED BY (i); -# Issue: quota enforcement doesn't work on table a -insert into a select generate_series(1,200000); -# quota enforcement works on table b -insert into b select generate_series(1,200000); -# quota enforcement works on table a, -# since quota limit of schema s1 has already exceeds. -insert into a select generate_series(1,200000); +CREATE TABLE a (i int) DISTRIBUTED BY (i); +-- Issue: quota enforcement doesn't work on table a +INSERT INTO a SELECT generate_series(1,200000); +-- quota enforcement works on table b +INSERT INTO b SELECT generate_series(1,200000); +-- quota enforcement works on table a, +-- since quota limit of schema s1 has already been exceeded +INSERT INTO a SELECT generate_series(1,200000); END; ``` -'Create Table As' command has the similar problem. +'CREATE TABLE AS' command has the similar problem. + +One solution direction is that we calculate the additional 'uncommitted data size' +for schema and role in worker process. Since pg_table_size needs to hold +AccessShareLock to relation (and worker process doesn't even know this reloid exists), +we need to skip it, and call stat() directly with tolerance to file unlink. +Skipping lock is dangerous and we plan to leave it as a known issue at the current stage. -One solution direction is that we calculate the additional 'uncommited data size' -for schema and role in worker process. Since pg_table_size need to hold -AccessShareLock to relation (And worker process don't even know this reloid exists), -we need to skip it, and call stat() directly with tolerant to file unlink. -Skip lock is dangerous and we plan to leave it as known issue at current stage. +### Missing empty schema or role in views -2. Missing empty schema or role in show_fast_schema_quota_view and show_fast_role_quota_view Currently, if there is no table in a specific schema or no table's owner is a specific role, these schemas or roles will not be listed in show_fast_schema_quota_view and show_fast_role_quota_view. -3. Out of shared memory +### Out of shared memory Diskquota extension uses two kinds of shared memories. One is used to save rejectmap and another one is to save active table list. The rejectmap shared @@ -313,7 +354,6 @@ As shared memory is pre-allocated, user needs to restart DB if they updated this GUC value. If rejectmap shared memory is full, it's possible to load data into some -schemas or roles which quota limit are reached. -If active table shared memory is full, disk quota worker may failed to detect +schemas or roles whose quota limits are reached. +If active table shared memory is full, disk quota worker may fail to detect the corresponding disk usage change in time. - diff --git a/gpcontrib/diskquota/cmake/Gpdb.cmake b/gpcontrib/diskquota/cmake/Gpdb.cmake index 4758d2d70a3..bd2ba722476 100644 --- a/gpcontrib/diskquota/cmake/Gpdb.cmake +++ b/gpcontrib/diskquota/cmake/Gpdb.cmake @@ -39,18 +39,31 @@ exec_program(${PG_CONFIG} ARGS --libs OUTPUT_VARIABLE PG_LIBS) exec_program(${PG_CONFIG} ARGS --libdir OUTPUT_VARIABLE PG_LIB_DIR) exec_program(${PG_CONFIG} ARGS --pgxs OUTPUT_VARIABLE PG_PGXS) get_filename_component(PG_HOME "${PG_BIN_DIR}/.." ABSOLUTE) -if (NOT PG_SRC_DIR) + +# If PG_SRC_DIR is provided (in-tree build), use source tree paths +# This is necessary because pg_config returns install paths, +# which don't exist yet during in-tree builds +if(PG_SRC_DIR) + set(PG_INCLUDE_DIR "${PG_SRC_DIR}/src/include") + set(PG_INCLUDE_DIR_SERVER "${PG_SRC_DIR}/src/include") + # libpq headers and library are in src/interfaces/libpq in source tree + set(PG_INCLUDE_DIR_LIBPQ "${PG_SRC_DIR}/src/interfaces/libpq") + set(PG_LIB_DIR "${PG_SRC_DIR}/src/interfaces/libpq") + message(STATUS "In-tree build: using source include path '${PG_INCLUDE_DIR}'") +else() + # Standalone build: try to derive PG_SRC_DIR from Makefile.global (optional) get_filename_component(pgsx_SRC_DIR ${PG_PGXS} DIRECTORY) set(makefile_global ${pgsx_SRC_DIR}/../Makefile.global) - # Some magic to find out the source code root from pg's Makefile.global - execute_process( - COMMAND_ECHO STDOUT - COMMAND - grep "^abs_top_builddir" ${makefile_global} - COMMAND - sed s/.*abs_top_builddir.*=\\\(.*\\\)/\\1/ - OUTPUT_VARIABLE PG_SRC_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) - string(STRIP ${PG_SRC_DIR} PG_SRC_DIR) + if(EXISTS ${makefile_global}) + execute_process( + COMMAND grep "^abs_top_builddir" ${makefile_global} + COMMAND sed s/.*abs_top_builddir.*=\(.*\)/\\1/ + OUTPUT_VARIABLE PG_SRC_DIR OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET) + if(PG_SRC_DIR) + string(STRIP ${PG_SRC_DIR} PG_SRC_DIR) + endif() + endif() endif() # Get the GP_MAJOR_VERSION from header @@ -58,14 +71,23 @@ file(READ ${PG_INCLUDE_DIR}/pg_config.h config_header) string(REGEX MATCH "#define *GP_MAJORVERSION *\"[0-9]+\"" macrodef "${config_header}") string(REGEX MATCH "[0-9]+" GP_MAJOR_VERSION "${macrodef}") if (GP_MAJOR_VERSION) - message(STATUS "Build extension for GPDB ${GP_MAJOR_VERSION}") + message(STATUS "Build extension for Cloudberry ${GP_MAJOR_VERSION}") else() message(FATAL_ERROR "Cannot read GP_MAJORVERSION from '${PG_INCLUDE_DIR}/pg_config.h'") endif() string(REGEX MATCH "#define *GP_VERSION *\"[^\"]*\"" macrodef "${config_header}") string(REGEX REPLACE ".*\"\(.*\)\".*" "\\1" GP_VERSION "${macrodef}") if (GP_VERSION) - message(STATUS "The exact GPDB version is '${GP_VERSION}'") + message(STATUS "The exact Cloudberry version is '${GP_VERSION}'") else() message(FATAL_ERROR "Cannot read GP_VERSION from '${PG_INCLUDE_DIR}/pg_config.h'") endif() + +# Check if PG_SRC_DIR is available (for source-dependent features like isolation2 tests) +if ("${PG_SRC_DIR}" STREQUAL "" OR NOT EXISTS "${PG_SRC_DIR}") + message(STATUS "PG_SRC_DIR not found or empty, source-dependent features will be disabled") + set(PG_SRC_DIR_AVAILABLE OFF CACHE BOOL "Whether PG_SRC_DIR is available") +else() + message(STATUS "PG_SRC_DIR is '${PG_SRC_DIR}'") + set(PG_SRC_DIR_AVAILABLE ON CACHE BOOL "Whether PG_SRC_DIR is available") +endif() diff --git a/gpcontrib/diskquota/cmake/Regress.cmake b/gpcontrib/diskquota/cmake/Regress.cmake index 11f23f47b12..9f823e4d998 100644 --- a/gpcontrib/diskquota/cmake/Regress.cmake +++ b/gpcontrib/diskquota/cmake/Regress.cmake @@ -130,7 +130,12 @@ function(RegressTarget_Add name) set(regress_BIN ${PG_SRC_DIR}/src/test/isolation2/pg_isolation2_regress) _PGIsolation2Target_Add(${working_DIR}) else() - set(regress_BIN ${PG_PKG_LIB_DIR}/pgxs/src/test/regress/pg_regress) + # For in-tree builds, use source tree path; for standalone builds, use installed path + if(PG_SRC_DIR AND EXISTS ${PG_SRC_DIR}/src/test/regress/pg_regress) + set(regress_BIN ${PG_SRC_DIR}/src/test/regress/pg_regress) + else() + set(regress_BIN ${PG_PKG_LIB_DIR}/pgxs/src/test/regress/pg_regress) + endif() if (NOT EXISTS ${regress_BIN}) message(FATAL_ERROR "Cannot find 'pg_regress' executable by path '${regress_BIN}'. Is 'pg_config' in the $PATH?") diff --git a/gpcontrib/diskquota/control/ddl/diskquota--1.0--2.0.sql b/gpcontrib/diskquota/control/ddl/diskquota--1.0--2.0.sql deleted file mode 100644 index 40a7969d75e..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--1.0--2.0.sql +++ /dev/null @@ -1,284 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota-2.0.so - --- table part -ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT 0; - -CREATE TABLE diskquota.target ( - rowId serial, - quotatype int, -- REFERENCES disquota.quota_config.quotatype, - primaryOid oid, - tablespaceOid oid, -- REFERENCES pg_tablespace.oid, - PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); --- TODO ALTER TABLE diskquota.target SET DEPENDS ON EXTENSION diskquota; - -ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; -- segid = coordinator means table size in cluster level -ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; -ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid, segid); -ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid, segid); - --- TODO SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); --- TODO SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); --- table part end - --- type define -ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; - -CREATE TYPE diskquota.rejectmap_entry AS ( - target_oid oid, - database_oid oid, - tablespace_oid oid, - target_type integer, - seg_exceeded boolean -); - -CREATE TYPE diskquota.rejectmap_entry_detail AS ( - target_type text, - target_oid oid, - database_oid oid, - tablespace_oid oid, - seg_exceeded boolean, - dbnode oid, - spcnode oid, - relnode oid, - segid int -); - -CREATE TYPE diskquota.relation_cache_detail AS ( - RELID oid, - PRIMARY_TABLE_OID oid, - AUXREL_NUM int, - OWNEROID oid, - NAMESPACEOID oid, - BACKENDID int, - SPCNODE oid, - DBNODE oid, - RELNODE oid, - RELSTORAGE "char", - AUXREL_OID oid[] -); --- type define end - --- UDF -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; - --- TODO solve dependency DROP FUNCTION diskquota.update_diskquota_db_list(oid, int4); - -CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; -CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; -CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; -CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; -CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; -CREATE FUNCTION diskquota.relation_size_local( - reltablespace oid, - relfilenode oid, - relpersistence "char", - relstorage "char") -RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; -CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; -CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; --- UDF end - --- views -CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS -SELECT ( - (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) - + - (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) -) AS dbsize; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -WITH - quota_usage AS ( - SELECT - relnamespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace - ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace -WHERE - quotaType = 0; -- NAMESPACE_QUOTA - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -WITH - quota_usage AS ( - SELECT - relowner, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner - ) -SELECT - rolname AS role_name, - targetoid AS role_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner -WHERE - quotaType = 1; -- ROLE_QUOTA - -CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relnamespace, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA - ) -SELECT - nspname AS schema_name, - primaryoid AS schema_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON primaryoid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relowner, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA - ) -SELECT - rolname AS role_name, - primaryoid AS role_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON primaryoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_segment_ratio_quota_view AS -SELECT - spcname as tablespace_name, - pg_tablespace.oid as tablespace_oid, - segratio as per_seg_quota_ratio -FROM - diskquota.quota_config JOIN - pg_tablespace ON targetOid = pg_tablespace.oid - AND quotatype = 4; - --- views end - diff --git a/gpcontrib/diskquota/control/ddl/diskquota--1.0.sql b/gpcontrib/diskquota/control/ddl/diskquota--1.0.sql deleted file mode 100644 index d4dd4e79608..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--1.0.sql +++ /dev/null @@ -1,71 +0,0 @@ --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION diskquota" to load this file. \quit - -CREATE SCHEMA diskquota; - --- Configuration table -CREATE TABLE diskquota.quota_config( - targetOid oid, - quotatype int, - quotalimitMB int8, - PRIMARY KEY(targetOid, quotatype) -); - -CREATE TABLE diskquota.table_size( - tableid oid, - size bigint, - PRIMARY KEY(tableid) -); - -CREATE TABLE diskquota.state( - state int, - PRIMARY KEY(state) -); - --- only diskquota.quota_config is dump-able, other table can be generate on fly -SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); - -CREATE TYPE diskquota.diskquota_active_table_type AS ( - "TABLE_OID" oid, - "TABLE_SIZE" int8 -); - -CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; - -CREATE VIEW diskquota.show_fast_schema_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace -GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname -ORDER BY pgns.nspname; - -CREATE VIEW diskquota.show_fast_role_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid -GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; - -CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ( - (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) - + - (SELECT SUM(size) FROM diskquota.table_size) -) AS dbsize; - --- prepare to boot -INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; - -CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -SELECT diskquota.diskquota_start_worker(); -DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.0--1.0.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.0--1.0.sql deleted file mode 100644 index 96338a81ceb..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.0--1.0.sql +++ /dev/null @@ -1,87 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota.so - --- views -DROP VIEW diskquota.rejectmap; -DROP VIEW diskquota.show_fast_schema_tablespace_quota_view; -DROP VIEW diskquota.show_fast_role_tablespace_quota_view; -DROP VIEW diskquota.show_segment_ratio_quota_view; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS -SELECT ( - (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) - + - (SELECT SUM(size) FROM diskquota.table_size) -) AS dbsize; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -SELECT pgns.nspname AS schema_name, pgc.relnamespace AS schema_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS nspsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_namespace AS pgns -WHERE ts.tableid = pgc.oid AND qc.targetoid = pgc.relnamespace AND pgns.oid = pgc.relnamespace -GROUP BY relnamespace, qc.quotalimitMB, pgns.nspname -ORDER BY pgns.nspname; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -SELECT pgr.rolname AS role_name, pgc.relowner AS role_oid, qc.quotalimitMB AS quota_in_mb, SUM(ts.size) AS rolsize_in_bytes -FROM diskquota.table_size AS ts, - pg_class AS pgc, - diskquota.quota_config AS qc, - pg_roles AS pgr -WHERE pgc.relowner = qc.targetoid AND pgc.relowner = pgr.oid AND ts.tableid = pgc.oid -GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitMB; --- views part end - --- UDF --- TODO find a way to use ALTER FUNCTION -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -/* 1.0--2.0 can not drop this UDF */ CREATE OR REPLACE FUNCTION diskquota.update_diskquota_db_list(oid, int4) RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; --- TODO find a way to run it in Postgresql 9.4 ALTER FUNCTION diskquota.update_diskquota_db_list(oid, int4) DEPENDS ON EXTENSION diskquota; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; - -DROP FUNCTION diskquota.set_schema_tablespace_quota(text, text, text); -DROP FUNCTION diskquota.set_role_tablespace_quota(text, text, text); -DROP FUNCTION diskquota.set_per_segment_quota(text, float4); -DROP FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]); -DROP FUNCTION diskquota.show_rejectmap(); -DROP FUNCTION diskquota.pause(); -DROP FUNCTION diskquota.resume(); -DROP FUNCTION diskquota.show_worker_epoch(); -DROP FUNCTION diskquota.wait_for_worker_new_epoch(); -DROP FUNCTION diskquota.status(); -DROP FUNCTION diskquota.show_relation_cache(); -DROP FUNCTION diskquota.relation_size_local( - reltablespace oid, - relfilenode oid, - relpersistence "char", - relstorage "char"); -DROP FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint); -DROP FUNCTION diskquota.relation_size(relation regclass); -DROP FUNCTION diskquota.show_relation_cache_all_seg(); --- UDF end - --- table part --- clean up NAMESPACE_TABLESPACE_QUOTA(2), ROLE_TABLESPACE_QUOTA(3) and TABLESPACE_QUOTA(4) -DELETE FROM diskquota.quota_config WHERE quotatype in (2, 3, 4); - -DROP TABLE diskquota.target; - -ALTER TABLE diskquota.quota_config DROP COLUMN segratio; - -ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid); -ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; --- clean up pre segments size information, 1.0 do not has this feature -DELETE FROM diskquota.table_size WHERE segid != -1; -ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid); -ALTER TABLE diskquota.table_size DROP COLUMN segid; --- table part end - --- type part -ALTER TYPE diskquota.diskquota_active_table_type DROP ATTRIBUTE "GP_SEGMENT_ID"; -DROP TYPE diskquota.rejectmap_entry; -DROP TYPE diskquota.rejectmap_entry_detail; -DROP TYPE diskquota.relation_cache_detail; --- type part end diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.0--2.1.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.0--2.1.sql deleted file mode 100644 index a87cae7f742..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.0--2.1.sql +++ /dev/null @@ -1,210 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota-2.1.so - --- UDF -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; --- UDF end - - --- views -CREATE VIEW diskquota.show_all_relation_view AS -WITH - relation_cache AS ( - SELECT (f).* FROM diskquota.show_relation_cache() as f - ) -SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( - SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache - UNION - SELECT oid, relowner, relnamespace, reltablespace from pg_class -) as union_relation; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -WITH - quota_usage AS ( - SELECT - relnamespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relnamespace - ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace -WHERE - quotaType = 0; -- NAMESPACE_QUOTA - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -WITH - quota_usage AS ( - SELECT - relowner, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relowner - ) -SELECT - rolname AS role_name, - targetoid AS role_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner -WHERE - quotaType = 1; -- ROLE_QUOTA - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relnamespace, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view, - default_tablespace - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relnamespace, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA - ) -SELECT - nspname AS schema_name, - primaryoid AS schema_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON primaryOid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relowner, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view, - default_tablespace - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relowner, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA - ) -SELECT - rolname AS role_name, - primaryoid AS role_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON primaryoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; - --- view end \ No newline at end of file diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.0.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.0.sql deleted file mode 100644 index 0587fc77744..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.0.sql +++ /dev/null @@ -1,313 +0,0 @@ --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION diskquota" to load this file. \quit - -CREATE SCHEMA diskquota; - --- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; --- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; -CREATE TABLE diskquota.quota_config( - targetOid oid, - quotatype int, - quotalimitMB int8, - segratio float4 DEFAULT 0, - PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); - -CREATE TABLE diskquota.target ( - rowId serial, - quotatype int, --REFERENCES disquota.quota_config.quotatype, - primaryOid oid, - tablespaceOid oid, --REFERENCES pg_tablespace.oid, - PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); - -CREATE TABLE diskquota.table_size( - tableid oid, - size bigint, - segid smallint, - PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); - -CREATE TABLE diskquota.state( - state int, - PRIMARY KEY(state) -) DISTRIBUTED BY (state); - --- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly -SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); -SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); - -CREATE TYPE diskquota.diskquota_active_table_type AS ( - "TABLE_OID" oid, - "TABLE_SIZE" int8, - "GP_SEGMENT_ID" smallint -); - -CREATE TYPE diskquota.rejectmap_entry AS ( - target_oid oid, - database_oid oid, - tablespace_oid oid, - target_type integer, - seg_exceeded boolean -); - -CREATE TYPE diskquota.rejectmap_entry_detail AS ( - target_type text, - target_oid oid, - database_oid oid, - tablespace_oid oid, - seg_exceeded boolean, - dbnode oid, - spcnode oid, - relnode oid, - segid int -); - -CREATE TYPE diskquota.relation_cache_detail AS ( - RELID oid, - PRIMARY_TABLE_OID oid, - AUXREL_NUM int, - OWNEROID oid, - NAMESPACEOID oid, - BACKENDID int, - SPCNODE oid, - DBNODE oid, - RELNODE oid, - RELSTORAGE "char", - AUXREL_OID oid[] -); - -CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; -CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; -CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; -CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; -CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; -CREATE FUNCTION diskquota.relation_size_local( - reltablespace oid, - relfilenode oid, - relpersistence "char", - relstorage "char") -RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; -CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; - -CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - - -CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; - --- view part -CREATE VIEW diskquota.show_fast_schema_quota_view AS -WITH - quota_usage AS ( - SELECT - relnamespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace - ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace -WHERE - quotaType = 0; -- NAMESPACE_QUOTA - -CREATE VIEW diskquota.show_fast_role_quota_view AS -WITH - quota_usage AS ( - SELECT - relowner, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner - ) -SELECT - rolname AS role_name, - targetoid AS role_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner -WHERE - quotaType = 1; -- ROLE_QUOTA - -CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ( - (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) - + - (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) -) AS dbsize; - -CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; - -CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relnamespace, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA - ) -SELECT - nspname AS schema_name, - primaryoid AS schema_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON primaryOid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relowner, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA - ) -SELECT - rolname AS role_name, - primaryoid AS role_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON primaryoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_segment_ratio_quota_view AS -SELECT - spcname as tablespace_name, - pg_tablespace.oid as tablespace_oid, - segratio as per_seg_quota_ratio -FROM - diskquota.quota_config JOIN - pg_tablespace ON targetOid = pg_tablespace.oid - AND quotatype = 4; - --- view end - --- prepare to boot -INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; - --- re-dispatch pause status to false. in case user pause-drop-recreate. --- refer to see test case 'test_drop_after_pause' -SELECT FROM diskquota.resume(); - - --- Starting the worker has to be the last step. -CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -SELECT diskquota.diskquota_start_worker(); -DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.0.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.0.sql deleted file mode 100644 index 16c92e4d252..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.0.sql +++ /dev/null @@ -1,200 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota-2.0.so - --- UDF -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; --- UDF end - - --- views -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS -WITH - quota_usage AS ( - SELECT - relnamespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace - ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace -WHERE - quotaType = 0; -- NAMESPACE_QUOTA - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS -WITH - quota_usage AS ( - SELECT - relowner, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner - ) -SELECT - rolname AS role_name, - targetoid AS role_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner -WHERE - quotaType = 1; -- ROLE_QUOTA - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relnamespace, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relnamespace, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA - ) -SELECT - nspname AS schema_name, - primaryoid AS schema_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON primaryOid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; - -/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relowner, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - pg_class, - default_tablespace - WHERE - tableid = pg_class.oid AND - segid = -1 - GROUP BY - relowner, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA - ) -SELECT - rolname AS role_name, - primaryoid AS role_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON primaryoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; - -DROP VIEW diskquota.show_all_relation_view; --- view end \ No newline at end of file diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.2.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.2.sql deleted file mode 100644 index 247847e1f48..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.1--2.2.sql +++ /dev/null @@ -1,63 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota-2.2.so - --- TYPE -ALTER TYPE diskquota.relation_cache_detail ADD ATTRIBUTE RELAM OID; --- TYPE END - --- UDF -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.2.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.2.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.2.so', 'show_rejectmap' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_pause' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_resume' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'show_worker_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.2.so', 'wait_for_worker_new_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.2.so', 'diskquota_status' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.2.so', 'show_relation_cache' LANGUAGE C; - -DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); -DROP FUNCTION IF EXISTS diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char"); -CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid) RETURNS bigint STRICT AS '$libdir/diskquota-2.2.so', 'relation_size_local' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.2.so', 'pull_all_table_size' LANGUAGE C; - -CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, - CASE WHEN EXISTS - (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, - relam) AS size - FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, - CASE WHEN EXISTS - (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END, - relam) AS size - FROM pg_class as relstorage WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; --- UDF end - --- VIEW -CREATE OR REPLACE VIEW diskquota.show_all_relation_view AS -WITH - relation_cache AS ( - SELECT (f).* FROM diskquota.show_relation_cache() as f - ) -SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( - SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache - UNION - SELECT oid, relowner, relnamespace, reltablespace from pg_class -) as union_relation; --- VIEW diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.1.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.1.sql deleted file mode 100644 index eb12606d69a..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.1.sql +++ /dev/null @@ -1,318 +0,0 @@ --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION diskquota" to load this file. \quit - -CREATE SCHEMA diskquota; - --- when (quotatype == NAMESPACE_QUOTA/ROLE_QUOTA) then targetOid = role_oid/schema_oid; --- when (quotatype == NAMESPACE_TABLESPACE_QUOTA/ROLE_TABLESPACE_QUOTA) then targetOid = diskquota.target.rowId; -CREATE TABLE diskquota.quota_config( - targetOid oid, - quotatype int, - quotalimitMB int8, - segratio float4 DEFAULT 0, - PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); - -CREATE TABLE diskquota.target ( - rowId serial, - quotatype int, --REFERENCES disquota.quota_config.quotatype, - primaryOid oid, - tablespaceOid oid, --REFERENCES pg_tablespace.oid, - PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); - -CREATE TABLE diskquota.table_size( - tableid oid, - size bigint, - segid smallint, - PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); - -CREATE TABLE diskquota.state( - state int, - PRIMARY KEY(state) -) DISTRIBUTED BY (state); - --- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly -SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') FROM gp_dist_random('gp_id'); -SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); - -CREATE TYPE diskquota.diskquota_active_table_type AS ( - "TABLE_OID" oid, - "TABLE_SIZE" int8, - "GP_SEGMENT_ID" smallint -); - -CREATE TYPE diskquota.rejectmap_entry AS ( - target_oid oid, - database_oid oid, - tablespace_oid oid, - target_type integer, - seg_exceeded boolean -); - -CREATE TYPE diskquota.rejectmap_entry_detail AS ( - target_type text, - target_oid oid, - database_oid oid, - tablespace_oid oid, - seg_exceeded boolean, - dbnode oid, - spcnode oid, - relnode oid, - segid int -); - -CREATE TYPE diskquota.relation_cache_detail AS ( - RELID oid, - PRIMARY_TABLE_OID oid, - AUXREL_NUM int, - OWNEROID oid, - NAMESPACEOID oid, - BACKENDID int, - SPCNODE oid, - DBNODE oid, - RELNODE oid, - RELSTORAGE "char", - AUXREL_OID oid[] -); - -CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; -CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; -CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; -CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; -CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; -CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; -CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; -CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; - -CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; - --- view part -CREATE VIEW diskquota.show_all_relation_view AS -WITH - relation_cache AS ( - SELECT (f).* FROM diskquota.show_relation_cache() as f - ) -SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( - SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache - UNION - SELECT oid, relowner, relnamespace, reltablespace from pg_class -) as union_relation; - -CREATE VIEW diskquota.show_fast_schema_quota_view AS -WITH - quota_usage AS ( - SELECT - relnamespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relnamespace - ) -SELECT - nspname AS schema_name, - targetoid AS schema_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace -WHERE - quotaType = 0; -- NAMESPACE_QUOTA - -CREATE VIEW diskquota.show_fast_role_quota_view AS -WITH - quota_usage AS ( - SELECT - relowner, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relowner - ) -SELECT - rolname AS role_name, - targetoid AS role_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_in_bytes -FROM - diskquota.quota_config JOIN - pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner -WHERE - quotaType = 1; -- ROLE_QUOTA - -CREATE VIEW diskquota.show_fast_database_size_view AS -SELECT ( - (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) - + - (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) -) AS dbsize; - -CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; - -CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relnamespace, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view, - default_tablespace - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relnamespace, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA - ) -SELECT - nspname AS schema_name, - primaryoid AS schema_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_namespace ON primaryOid = pg_namespace.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS -WITH - default_tablespace AS ( - SELECT dattablespace FROM pg_database - WHERE datname = current_database() - ), - quota_usage AS ( - SELECT - relowner, - CASE - WHEN reltablespace = 0 THEN dattablespace - ELSE reltablespace - END AS reltablespace, - SUM(size) AS total_size - FROM - diskquota.table_size, - diskquota.show_all_relation_view, - default_tablespace - WHERE - tableid = diskquota.show_all_relation_view.oid AND - segid = -1 - GROUP BY - relowner, - reltablespace, - dattablespace - ), - full_quota_config AS ( - SELECT - primaryOid, - tablespaceoid, - quotalimitMB - FROM - diskquota.quota_config AS config, - diskquota.target AS target - WHERE - config.targetOid = target.rowId AND - config.quotaType = target.quotaType AND - config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA - ) -SELECT - rolname AS role_name, - primaryoid AS role_oid, - spcname AS tablespace_name, - tablespaceoid AS tablespace_oid, - quotalimitMB AS quota_in_mb, - COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes -FROM - full_quota_config JOIN - pg_roles ON primaryoid = pg_roles.oid JOIN - pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN - quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; - -CREATE VIEW diskquota.show_segment_ratio_quota_view AS -SELECT - spcname as tablespace_name, - pg_tablespace.oid as tablespace_oid, - segratio as per_seg_quota_ratio -FROM - diskquota.quota_config JOIN - pg_tablespace ON targetOid = pg_tablespace.oid - AND quotatype = 4; - --- view end - --- prepare to boot -INSERT INTO diskquota.state SELECT (count(relname) = 0)::int FROM pg_class AS c, pg_namespace AS n WHERE c.oid > 16384 AND relnamespace = n.oid AND nspname != 'diskquota'; - --- re-dispatch pause status to false. in case user pause-drop-recreate. --- refer to see test case 'test_drop_after_pause' -SELECT FROM diskquota.resume(); - - ---- Starting the worker has to be the last step. -CREATE FUNCTION diskquota.diskquota_start_worker() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -SELECT diskquota.diskquota_start_worker(); -DROP FUNCTION diskquota.diskquota_start_worker(); diff --git a/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.1.sql b/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.1.sql deleted file mode 100644 index 43d384aa457..00000000000 --- a/gpcontrib/diskquota/control/ddl/diskquota--2.2--2.1.sql +++ /dev/null @@ -1,56 +0,0 @@ --- TODO check if worker should not refresh, current lib should be diskquota-2.1.so - --- TYPE -ALTER TYPE diskquota.relation_cache_detail DROP ATTRIBUTE RELAM; --- TYPE END - --- UDF -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.1.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.1.so' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.1.so', 'show_rejectmap' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_pause' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_resume' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'show_worker_epoch' LANGUAGE C; /* ALTER */ CREATE OR REPLACE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.1.so', 'wait_for_worker_new_epoch' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.1.so', 'diskquota_status' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.1.so', 'show_relation_cache' LANGUAGE C; -DROP FUNCTION IF EXISTS diskquota.relation_size(relation regclass); -DROP FUNCTION IF EXISTS diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char", relam oid); -CREATE FUNCTION diskquota.relation_size_local(reltablespace oid, relfilenode oid, relpersistence "char", relstorage "char") RETURNS bigint STRICT AS '$libdir/diskquota-2.1.so', 'relation_size_local' LANGUAGE C; -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.1.so', 'pull_all_table_size' LANGUAGE C; - - -CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ - SELECT SUM(size)::bigint FROM ( - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM gp_dist_random('pg_class') WHERE oid = relation - UNION ALL - SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size - FROM pg_class WHERE oid = relation - ) AS t $$ LANGUAGE SQL; - -/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ - WITH relation_cache AS ( - SELECT diskquota.show_relation_cache() AS a - FROM gp_dist_random('gp_id') - ) - SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; --- UDF end - --- VIEW -CREATE OR REPLACE VIEW diskquota.show_all_relation_view AS -WITH - relation_cache AS ( - SELECT (f).* FROM diskquota.show_relation_cache() as f - ) -SELECT DISTINCT(oid), relowner, relnamespace, reltablespace from ( - SELECT relid as oid, owneroid as relowner, namespaceoid as relnamespace, spcnode as reltablespace FROM relation_cache - UNION - SELECT oid, relowner, relnamespace, reltablespace from pg_class -) as union_relation; --- VIEW diff --git a/gpcontrib/diskquota/control/ddl/diskquota.control b/gpcontrib/diskquota/control/ddl/diskquota.control index 4e5f6e5e6cf..67dc913740d 100644 --- a/gpcontrib/diskquota/control/ddl/diskquota.control +++ b/gpcontrib/diskquota/control/ddl/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension -comment = 'Disk Quota Main Program' +comment = 'disk usage enforcement for database objects' default_version = '2.3' module_pathname = 'do-not-use-this' relocatable = true diff --git a/gpcontrib/diskquota/src/diskquota.c b/gpcontrib/diskquota/src/diskquota.c index f714a4f0e47..d94dc49ee66 100644 --- a/gpcontrib/diskquota/src/diskquota.c +++ b/gpcontrib/diskquota/src/diskquota.c @@ -15,11 +15,11 @@ * * ------------------------------------------------------------------------- */ +#include "postgres.h" + #include "diskquota.h" #include "gp_activetable.h" -#include "postgres.h" - #include "funcapi.h" #include "pgstat.h" #include "access/xact.h" @@ -149,10 +149,6 @@ static void vacuum_db_entry(DiskquotaDBEntry *db); static void init_bgworker_handles(void); static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); -#if GP_VERSION_NUM < 70000 -/* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ -static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); -#endif /* GP_VERSION_NUM */ static bool is_altering_extension_to_default_version(char *version); static bool check_alter_extension(void); @@ -461,16 +457,9 @@ disk_quota_worker_main(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); -#if GP_VERSION_NUM < 70000 - /* Connect to our database */ - BackgroundWorkerInitializeConnection(dbname, NULL); - set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, - 0); -#else BackgroundWorkerInitializeConnection(dbname, NULL, 0); set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, true); -#endif /* GP_VERSION_NUM */ /* diskquota worker should has Gp_role as dispatcher */ Gp_role = GP_ROLE_DISPATCH; @@ -492,22 +481,29 @@ disk_quota_worker_main(Datum main_arg) int has_error = worker_spi_get_extension_version(&major, &minor) != 0; if (major == DISKQUOTA_MAJOR_VERSION && minor == DISKQUOTA_MINOR_VERSION) break; -#if GP_VERSION_NUM < 70000 - /* MemoryAccount has been removed on gpdb7 */ - MemoryAccounting_Reset(); -#endif /* GP_VERSION_NUM */ if (has_error) { static char _errfmt[] = "find issues in pg_class.pg_extension check server log. waited %d seconds", _errmsg[sizeof(_errfmt) + sizeof("2147483647" /* INT_MAX */) + 1] = {}; snprintf(_errmsg, sizeof(_errmsg), _errfmt, times * diskquota_naptime); - init_ps_display("bgworker:", "[diskquota]", dbname, _errmsg); + { + char _psbuf[256]; + if (_errmsg[0] != '\0') + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s %s", dbname, _errmsg); + else + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s", dbname); + set_ps_display(_psbuf); + } } else { - init_ps_display("bgworker:", "[diskquota]", dbname, - "v" DISKQUOTA_VERSION " is not matching with current SQL. stop working"); + { + char _psbuf[256]; + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s v" DISKQUOTA_VERSION " is not matching with current SQL. stop working", + dbname); + set_ps_display(_psbuf); + } } ereportif(!has_error && times == 0, WARNING, @@ -535,7 +531,11 @@ disk_quota_worker_main(Datum main_arg) * function `init_ps_display`, we only want the ps name looks like * 'bgworker: [diskquota] ...' */ - init_ps_display("bgworker:", "[diskquota]", dbname, ""); + { + char _psbuf[256]; + snprintf(_psbuf, sizeof(_psbuf), "bgworker: [diskquota] %s", dbname); + set_ps_display(_psbuf); + } /* suppose the database is ready, if not, then set it to false */ bool is_ready = true; @@ -560,9 +560,6 @@ disk_quota_worker_main(Datum main_arg) break; } -#if GP_VERSION_NUM < 70000 - MemoryAccounting_Reset(); -#endif /* GP_VERSION_NUM */ if (is_ready) { update_monitordb_status(MyWorkerInfo->dbEntry->dbid, DB_UNREADY); @@ -644,13 +641,6 @@ disk_quota_worker_main(Datum main_arg) } worker_increase_epoch(MyWorkerInfo->dbEntry->dbid); - // GPDB6 opend a MemoryAccount for us without asking us. - // and GPDB6 did not release the MemoryAccount after SPI finish. - // Reset the MemoryAccount although we never create it. -#if GP_VERSION_NUM < 70000 - MemoryAccounting_Reset(); -#endif /* GP_VERSION_NUM */ - sleep_time = 0; } loop_start_timestamp = GetCurrentTimestamp(); @@ -745,16 +735,9 @@ disk_quota_launcher_main(Datum main_arg) * 'diskquota' database is not existed. */ -#if GP_VERSION_NUM < 70000 - /* Connect to our database */ - BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL); - set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, - 0); -#else BackgroundWorkerInitializeConnection(DISKQUOTA_DB, NULL, 0); set_config_option("application_name", DISKQUOTA_APPLICATION_NAME, PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, true); -#endif /* GP_VERSION_NUM */ /* * use table diskquota_namespace.database_list to store diskquota enabled @@ -1068,22 +1051,12 @@ init_database_list(void) strerror(saved_errno), ret))); } tupdesc = SPI_tuptable->tupdesc; -#if GP_VERSION_NUM < 70000 - if (tupdesc->natts != 1 || tupdesc->attrs[0]->atttypid != OIDOID) - { - ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", - tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0]->atttypid))); - ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); - } -#else - if (tupdesc->natts != 1 || tupdesc->attrs[0].atttypid != OIDOID) { ereport(LOG, (errmsg("[diskquota launcher], natts/atttypid: %d.", tupdesc->natts != 1 ? tupdesc->natts : tupdesc->attrs[0].atttypid))); ereport(ERROR, (errmsg("[diskquota launcher] table database_list corrupt, launcher will exit. natts: "))); } -#endif /* GP_VERSION_NUM */ for (i = 0; i < SPI_processed; i++) { HeapTuple tup; @@ -1156,9 +1129,6 @@ process_extension_ddl_message() (errmsg("[diskquota launcher]: received create/drop extension diskquota message, extension launcher"))); do_process_extension_ddl_message(&code, local_extension_ddl_message); -#if GP_VERSION_NUM < 70000 - MemoryAccounting_Reset(); -#endif /* GP_VERSION_NUM */ /* Send createdrop extension diskquota result back to QD */ LWLockAcquire(diskquota_locks.extension_ddl_message_lock, LW_EXCLUSIVE); @@ -1651,7 +1621,8 @@ diskquota_status_schema_version() goto fail; } - StrNCpy(ret_version, version, sizeof(ret_version) - 1); + /* copy and ensure null termination */ + snprintf(ret_version, sizeof(ret_version), "%s", version); SPI_finish(); return ret_version; @@ -2011,34 +1982,3 @@ free_bgworker_handle(uint32 worker_id) *handle = NULL; } } - -#if GP_VERSION_NUM < 70000 -static BgwHandleStatus -WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) -{ - BgwHandleStatus status; - int rc; - - for (;;) - { - pid_t pid; - - CHECK_FOR_INTERRUPTS(); - - status = GetBackgroundWorkerPid(handle, &pid); - if (status == BGWH_STOPPED) break; - - rc = DiskquotaWaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, 0); - - if (rc & WL_POSTMASTER_DEATH) - { - status = BGWH_POSTMASTER_DIED; - break; - } - - ResetLatch(&MyProc->procLatch); - } - - return status; -} -#endif /* GP_VERSION_NUM */ diff --git a/gpcontrib/diskquota/src/diskquota.h b/gpcontrib/diskquota/src/diskquota.h index b3d3481c86f..f45dd852548 100644 --- a/gpcontrib/diskquota/src/diskquota.h +++ b/gpcontrib/diskquota/src/diskquota.h @@ -58,14 +58,6 @@ typedef enum #define EXTENSION_SCHEMA "diskquota" extern int diskquota_worker_timeout; -#if GP_VERSION_NUM < 70000 -#define TableIsHeap(relstorage, relam) ((bool)(relstorage == RELSTORAGE_HEAP)) -#define TableIsAoRows(relstorage, relam) ((bool)(relstorage == RELSTORAGE_AOROWS)) -#define TableIsAoCols(relstorage, relam) ((bool)(relstorage == RELSTORAGE_AOCOLS)) -#define DiskquotaCreateTemplateTupleDesc(natts) CreateTemplateTupleDesc(natts, false /*hasoid*/) -#define DiskquotaWaitLatch(latch, wakeEvents, timeout) WaitLatch(latch, wakeEvents, timeout) -#define DiskquotaGetRelstorage(classForm) (classForm->relstorage) -#else #define TableIsHeap(relstorage, relam) \ ((bool)(relam != 0 && relam != AO_ROW_TABLE_AM_OID && relam != AO_COLUMN_TABLE_AM_OID)) #define TableIsAoRows(relstorage, relam) ((bool)(relam == AO_ROW_TABLE_AM_OID)) @@ -73,7 +65,6 @@ extern int diskquota_worker_timeout; #define DiskquotaCreateTemplateTupleDesc(natts) CreateTemplateTupleDesc(natts); #define DiskquotaWaitLatch(latch, wakeEvents, timeout) WaitLatch(latch, wakeEvents, timeout, WAIT_EVENT_PG_SLEEP) #define DiskquotaGetRelstorage(classForm) (0) -#endif /* GP_VERSION_NUM */ typedef enum { diff --git a/gpcontrib/diskquota/src/diskquota_utility.c b/gpcontrib/diskquota/src/diskquota_utility.c index 28b874e8ade..6bb54de64b4 100644 --- a/gpcontrib/diskquota/src/diskquota_utility.c +++ b/gpcontrib/diskquota/src/diskquota_utility.c @@ -23,10 +23,9 @@ #include "access/aomd.h" #include "access/xact.h" #include "access/heapam.h" -#if GP_VERSION_NUM >= 70000 #include "access/genam.h" #include "common/hashfn.h" -#endif /* GP_VERSION_NUM */ +#include "catalog/gp_indexing.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_authid.h" @@ -134,7 +133,7 @@ init_table_size_table(PG_FUNCTION_ARGS) /* ensure table diskquota.state exists */ rv = makeRangeVar("diskquota", "state", -1); - rel = heap_openrv_extended(rv, AccessShareLock, true); + rel = table_openrv_extended(rv, AccessShareLock, true); if (!rel) { /* configuration table is missing. */ @@ -143,7 +142,7 @@ init_table_size_table(PG_FUNCTION_ARGS) " please recreate diskquota extension", get_database_name(MyDatabaseId)); } - heap_close(rel, NoLock); + table_close(rel, NoLock); /* * Why don't use insert into diskquota.table_size select from pg_table_size here? @@ -208,11 +207,7 @@ calculate_all_table_size() { Relation classRel; HeapTuple tuple; -#if GP_VERSION_NUM < 70000 - HeapScanDesc relScan; -#else TableScanDesc relScan; -#endif /* GP_VERSION_NUM */ Oid relid; Oid prelid; Size tablesize; @@ -231,12 +226,8 @@ calculate_all_table_size() local_table_size_map = diskquota_hash_create("local_table_size_map", 1024, &hashctl, HASH_ELEM | HASH_CONTEXT, DISKQUOTA_TAG_HASH); - classRel = heap_open(RelationRelationId, AccessShareLock); -#if GP_VERSION_NUM < 70000 - relScan = heap_beginscan_catalog(classRel, 0, NULL); -#else + classRel = table_open(RelationRelationId, AccessShareLock); relScan = table_beginscan_catalog(classRel, 0, NULL); -#endif /* GP_VERSION_NUM */ while ((tuple = heap_getnext(relScan, ForwardScanDirection)) != NULL) { @@ -247,11 +238,7 @@ calculate_all_table_size() classForm->relkind != RELKIND_TOASTVALUE) continue; -#if GP_VERSION_NUM < 70000 - relid = HeapTupleGetOid(tuple); -#else relid = classForm->oid; -#endif /* GP_VERSION_NUM */ /* ignore system table */ if (relid < FirstNormalObjectId) continue; @@ -279,8 +266,8 @@ calculate_all_table_size() } entry->tablesize += tablesize; } - heap_endscan(relScan); - heap_close(classRel, AccessShareLock); + table_endscan(relScan); + table_close(classRel, AccessShareLock); return local_table_size_map; } @@ -644,8 +631,10 @@ __get_oid_auto_case_convert(Oid (*f)(const char *name, bool missing_ok), const c if (l > 2 && name[0] == '"' && name[l - 1] == '"') { // object name wrapped by '"'. eg: "foo" - b = palloc(l); - StrNCpy(b, name + 1, l - 1); // trim the '"'. unlike strncpy, StrNCpy will ensure b[l-1] = '\0' + // l - 2 is the length without quotes, +1 for null terminator + b = palloc(l - 1); + memcpy(b, name + 1, l - 2); + b[l - 2] = '\0'; } else { @@ -1487,7 +1476,7 @@ diskquota_get_index_list(Oid relid) /* Prepare to scan pg_index for entries having indrelid = this rel. */ ScanKeyInit(&skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, relid); - indrel = heap_open(IndexRelationId, AccessShareLock); + indrel = table_open(IndexRelationId, AccessShareLock); indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, NULL, 1, &skey); while (HeapTupleIsValid(htup = systable_getnext(indscan))) @@ -1508,7 +1497,7 @@ diskquota_get_index_list(Oid relid) systable_endscan(indscan); - heap_close(indrel, AccessShareLock); + table_close(indrel, AccessShareLock); return result; } @@ -1528,7 +1517,7 @@ diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirreli bool isnull; ScanKeyInit(&skey, Anum_pg_appendonly_relid, BTEqualStrategyNumber, F_OIDEQ, reloid); - aorel = heap_open(AppendOnlyRelationId, AccessShareLock); + aorel = table_open(AppendOnlyRelationId, AccessShareLock); tupDesc = RelationGetDescr(aorel); scan = systable_beginscan(aorel, AppendOnlyRelidIndexId, true /*indexOk*/, NULL /*snapshot*/, 1 /*nkeys*/, &skey); while (HeapTupleIsValid(htup = systable_getnext(scan))) @@ -1553,7 +1542,7 @@ diskquota_get_appendonly_aux_oid_list(Oid reloid, Oid *segrelid, Oid *blkdirreli } systable_endscan(scan); - heap_close(aorel, AccessShareLock); + table_close(aorel, AccessShareLock); } Oid @@ -1650,17 +1639,7 @@ check_role(Oid roleoid, char *rolname, int64 quota_limit_mb) HTAB * diskquota_hash_create(const char *tabname, long nelem, HASHCTL *info, int flags, DiskquotaHashFunction hashFunction) { -#if GP_VERSION_NUM < 70000 - if (hashFunction == DISKQUOTA_TAG_HASH) - info->hash = tag_hash; - else if (hashFunction == DISKQUOTA_OID_HASH) - info->hash = oid_hash; - else - info->hash = string_hash; - return hash_create(tabname, nelem, info, flags | HASH_FUNCTION); -#else return hash_create(tabname, nelem, info, flags | HASH_BLOBS); -#endif /* GP_VERSION_NUM */ } HTAB * @@ -1671,15 +1650,5 @@ DiskquotaShmemInitHash(const char *name, /* table string name fo int hash_flags, /* info about infoP */ DiskquotaHashFunction hashFunction) { -#if GP_VERSION_NUM < 70000 - if (hashFunction == DISKQUOTA_TAG_HASH) - infoP->hash = tag_hash; - else if (hashFunction == DISKQUOTA_OID_HASH) - infoP->hash = oid_hash; - else - infoP->hash = string_hash; - return ShmemInitHash(name, init_size, max_size, infoP, hash_flags | HASH_FUNCTION); -#else return ShmemInitHash(name, init_size, max_size, infoP, hash_flags | HASH_BLOBS); -#endif /* GP_VERSION_NUM */ } diff --git a/gpcontrib/diskquota/src/gp_activetable.c b/gpcontrib/diskquota/src/gp_activetable.c index 6e76633be7a..85309b9ce0d 100644 --- a/gpcontrib/diskquota/src/gp_activetable.c +++ b/gpcontrib/diskquota/src/gp_activetable.c @@ -17,9 +17,7 @@ #include "postgres.h" #include "access/htup_details.h" -#if GP_VERSION_NUM >= 70000 #include "access/relation.h" -#endif /* GP_VERSION_NUM */ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/objectaccess.h" @@ -37,6 +35,7 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/inval.h" +#include "utils/array.h" #include "gp_activetable.h" #include "diskquota.h" @@ -681,11 +680,7 @@ is_relation_being_altered(Oid relid) { LOCKTAG locktag; SetLocktagRelationOid(&locktag, relid); -#if GP_VERSION_NUM < 70000 - VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock); -#else VirtualTransactionId *vxid_list = GetLockConflicts(&locktag, AccessShareLock, NULL); -#endif /* GP_VERSION_NUM */ bool being_altered = VirtualTransactionIdIsValid(*vxid_list); /* if vxid_list is empty */ pfree(vxid_list); return being_altered; @@ -959,13 +954,8 @@ load_table_size(HTAB *local_table_stats_map) } tupdesc = SPI_tuptable->tupdesc; -#if GP_VERSION_NUM < 70000 - if (tupdesc->natts != 3 || ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT8OID || ((tupdesc)->attrs[2])->atttypid != INT2OID) -#else if (tupdesc->natts != 3 || ((tupdesc)->attrs[0]).atttypid != OIDOID || ((tupdesc)->attrs[1]).atttypid != INT8OID || ((tupdesc)->attrs[2]).atttypid != INT2OID) -#endif /* GP_VERSION_NUM */ { if (tupdesc->natts != 3) { @@ -973,13 +963,8 @@ load_table_size(HTAB *local_table_stats_map) } else { -#if GP_VERSION_NUM < 70000 - ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0]->atttypid, - tupdesc->attrs[1]->atttypid, tupdesc->attrs[2]->atttypid))); -#else ereport(WARNING, (errmsg("[diskquota] attrs: %d, %d, %d", tupdesc->attrs[0].atttypid, tupdesc->attrs[1].atttypid, tupdesc->attrs[2].atttypid))); -#endif /* GP_VERSION_NUM */ } ereport(ERROR, (errmsg("[diskquota] table \"table_size\" is corrupted in database \"%s\"," " please recreate diskquota extension", @@ -1213,4 +1198,4 @@ pull_active_table_size_from_seg(HTAB *local_table_stats_map, char *active_oid_ar } cdbdisp_clearCdbPgResults(&cdb_pgresults); return; -} +} \ No newline at end of file diff --git a/gpcontrib/diskquota/src/monitored_db.c b/gpcontrib/diskquota/src/monitored_db.c index c2b29a1bad9..fd590a9f718 100644 --- a/gpcontrib/diskquota/src/monitored_db.c +++ b/gpcontrib/diskquota/src/monitored_db.c @@ -1,6 +1,7 @@ -#include "diskquota.h" #include "postgres.h" +#include "diskquota.h" + #include "funcapi.h" #include "pgstat.h" #include "port/atomics.h" diff --git a/gpcontrib/diskquota/src/quotamodel.c b/gpcontrib/diskquota/src/quotamodel.c index 764b638b0c7..30f383ef57a 100644 --- a/gpcontrib/diskquota/src/quotamodel.c +++ b/gpcontrib/diskquota/src/quotamodel.c @@ -14,12 +14,12 @@ * * ------------------------------------------------------------------------- */ +#include "postgres.h" + #include "diskquota.h" #include "gp_activetable.h" #include "relation_cache.h" -#include "postgres.h" - #include "access/xact.h" #include "catalog/pg_tablespace.h" #include "commands/dbcommands.h" @@ -415,11 +415,7 @@ init_disk_quota_shmem(void) */ RequestAddinShmemSpace(DiskQuotaShmemSize()); /* locks for diskquota refer to init_lwlocks() for details */ -#if GP_VERSION_NUM < 70000 - RequestAddinLWLocks(DiskQuotaLocksItemNumber); -#else RequestNamedLWLockTranche("DiskquotaLocks", DiskQuotaLocksItemNumber); -#endif /* GP_VERSION_NUM */ /* Install startup hook to initialize our shared memory. */ prev_shmem_startup_hook = shmem_startup_hook; @@ -492,17 +488,6 @@ disk_quota_shmem_startup(void) static void init_lwlocks(void) { -#if GP_VERSION_NUM < 70000 - diskquota_locks.active_table_lock = LWLockAssign(); - diskquota_locks.reject_map_lock = LWLockAssign(); - diskquota_locks.extension_ddl_message_lock = LWLockAssign(); - diskquota_locks.extension_ddl_lock = LWLockAssign(); - diskquota_locks.monitored_dbid_cache_lock = LWLockAssign(); - diskquota_locks.relation_cache_lock = LWLockAssign(); - diskquota_locks.dblist_lock = LWLockAssign(); - diskquota_locks.workerlist_lock = LWLockAssign(); - diskquota_locks.altered_reloid_cache_lock = LWLockAssign(); -#else LWLockPadded *lock_base = GetNamedLWLockTranche("DiskquotaLocks"); diskquota_locks.active_table_lock = &lock_base[0].lock; diskquota_locks.reject_map_lock = &lock_base[1].lock; @@ -513,7 +498,6 @@ init_lwlocks(void) diskquota_locks.dblist_lock = &lock_base[6].lock; diskquota_locks.workerlist_lock = &lock_base[7].lock; diskquota_locks.altered_reloid_cache_lock = &lock_base[8].lock; -#endif /* GP_VERSION_NUM */ } static Size @@ -735,11 +719,7 @@ do_check_diskquota_state_is_ready(void) errmsg("[diskquota] check diskquota state SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; -#if GP_VERSION_NUM < 70000 - if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0])->atttypid != INT4OID) -#else if (SPI_processed != 1 || tupdesc->natts != 1 || ((tupdesc)->attrs[0]).atttypid != INT4OID) -#endif /* GP_VERSION_NUM */ { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] \"diskquota.state\" is corrupted in database \"%s\"," @@ -1511,13 +1491,8 @@ do_load_quotas(void) errmsg("[diskquota] load_quotas SPI_execute failed: error code %d", ret))); tupdesc = SPI_tuptable->tupdesc; -#if GP_VERSION_NUM < 70000 - if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0])->atttypid != OIDOID || - ((tupdesc)->attrs[1])->atttypid != INT4OID || ((tupdesc)->attrs[2])->atttypid != INT8OID) -#else if (tupdesc->natts != NUM_QUOTA_CONFIG_ATTRS || ((tupdesc)->attrs[0]).atttypid != OIDOID || ((tupdesc)->attrs[1]).atttypid != INT4OID || ((tupdesc)->attrs[2]).atttypid != INT8OID) -#endif /* GP_VERSION_NUM */ { ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("[diskquota] configuration table is corrupted in database \"%s\"," @@ -1810,11 +1785,7 @@ GetUserName(Oid relowner, bool skip_name) pg_ltoa(relowner, namestr.data); return pstrdup(namestr.data); } -#if GP_VERSION_NUM < 70000 - return GetUserNameFromId(relowner); -#else return GetUserNameFromId(relowner, false); -#endif /* GP_VERSION_NUM */ } static void @@ -2266,19 +2237,19 @@ show_rejectmap(PG_FUNCTION_ARGS) switch ((QuotaType)keyitem.targettype) { case ROLE_QUOTA: - StrNCpy(targettype_str, "ROLE_QUOTA", _TARGETTYPE_STR_SIZE); + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "ROLE_QUOTA"); break; case NAMESPACE_QUOTA: - StrNCpy(targettype_str, "NAMESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "NAMESPACE_QUOTA"); break; case ROLE_TABLESPACE_QUOTA: - StrNCpy(targettype_str, "ROLE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "ROLE_TABLESPACE_QUOTA"); break; case NAMESPACE_TABLESPACE_QUOTA: - StrNCpy(targettype_str, "NAMESPACE_TABLESPACE_QUOTA", _TARGETTYPE_STR_SIZE); + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "NAMESPACE_TABLESPACE_QUOTA"); break; default: - StrNCpy(targettype_str, "UNKNOWN", _TARGETTYPE_STR_SIZE); + snprintf(targettype_str, _TARGETTYPE_STR_SIZE, "%s", "UNKNOWN"); break; } diff --git a/gpcontrib/diskquota/src/relation_cache.c b/gpcontrib/diskquota/src/relation_cache.c index 647779deee3..8b1e7f11d61 100644 --- a/gpcontrib/diskquota/src/relation_cache.c +++ b/gpcontrib/diskquota/src/relation_cache.c @@ -11,9 +11,7 @@ */ #include "postgres.h" -#if GP_VERSION_NUM >= 70000 #include "access/relation.h" -#endif /* GP_VERSION_NUM */ #include "catalog/indexing.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" diff --git a/gpcontrib/diskquota/tests/CMakeLists.txt b/gpcontrib/diskquota/tests/CMakeLists.txt index ee56ea15ac3..72c65f73cbd 100644 --- a/gpcontrib/diskquota/tests/CMakeLists.txt +++ b/gpcontrib/diskquota/tests/CMakeLists.txt @@ -2,16 +2,9 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected) list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected) -if (${GP_MAJOR_VERSION} EQUAL 7) - list(APPEND isolation2_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/expected7) - list(APPEND regress_expected_DIR ${CMAKE_CURRENT_SOURCE_DIR}/regress/expected7) - # PLPYTHON_LANG_STR will be replaced by Regress.cmake - set(PLPYTHON_LANG_STR "plpython3u") - set(POSTMASTER_START_CMD "pg_ctl -D $MASTER_DATA_DIRECTORY -w -o \"-c gp_role=dispatch\" start") -else() - set(PLPYTHON_LANG_STR "plpython2u") - set(POSTMASTER_START_CMD "pg_ctl -D $MASTER_DATA_DIRECTORY -w -o \"-E\" start") -endif() +# PLPYTHON_LANG_STR will be replaced by Regress.cmake +set(PLPYTHON_LANG_STR "plpython3u") +set(POSTMASTER_START_CMD "pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w -o \"-c gp_role=dispatch\" start") set(exclude_fault_injector OFF) # GP7 release build doesn't support fault injector. @@ -20,6 +13,23 @@ if (CMAKE_BUILD_TYPE STREQUAL "Release") set(exclude_fault_injector ON) endif() +# Check if pg_isolation2_regress is available (either pre-built or can be built from source) +# In binary-only installations, PG_SRC_DIR may not exist or isolation2 may not be buildable +set(ENABLE_ISOLATION2_TESTS OFF) + +# First, check if pg_isolation2_regress is already installed +find_program(PG_ISOLATION2_REGRESS pg_isolation2_regress HINTS ${PG_BIN_DIR}) +if(PG_ISOLATION2_REGRESS) + message(STATUS "Found pg_isolation2_regress: ${PG_ISOLATION2_REGRESS}") + set(ENABLE_ISOLATION2_TESTS ON) +elseif(PG_SRC_DIR_AVAILABLE AND EXISTS "${PG_SRC_DIR}/src/test/isolation2/Makefile") + # Can build from source + message(STATUS "pg_isolation2_regress will be built from source: ${PG_SRC_DIR}/src/test/isolation2") + set(ENABLE_ISOLATION2_TESTS ON) +else() + message(WARNING "pg_isolation2_regress not found and cannot be built from source. Isolation2 tests will be disabled.") +endif() + RegressTarget_Add(regress INIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/init_file @@ -34,32 +44,39 @@ RegressTarget_Add(regress --load-extension=diskquota_test --dbname=contrib_regression) -RegressTarget_Add(isolation2 - REGRESS_TYPE - isolation2 - INIT_FILE - ${CMAKE_CURRENT_SOURCE_DIR}/init_file - SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql - EXPECTED_DIR ${isolation2_expected_DIR} - RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results - DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data - SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule - EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} - REGRESS_OPTS - --load-extension=gp_inject_fault - --dbname=isolation2test) +if(ENABLE_ISOLATION2_TESTS) + RegressTarget_Add(isolation2 + REGRESS_TYPE + isolation2 + INIT_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/init_file + SQL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/sql + EXPECTED_DIR ${isolation2_expected_DIR} + RESULTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/results + DATA_DIR ${CMAKE_CURRENT_SOURCE_DIR}/data + SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/isolation2/isolation2_schedule + EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} + REGRESS_OPTS + --load-extension=gp_inject_fault + --dbname=isolation2test) +endif() add_custom_target(install_test_extension COMMAND - cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test.control ${PG_HOME}/share/postgresql/extension + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test.control ${CMAKE_INSTALL_PREFIX}/share/postgresql/extension COMMAND - cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test--1.0.sql ${PG_HOME}/share/postgresql/extension + cmake -E copy ${CMAKE_SOURCE_DIR}/control/test/diskquota_test--1.0.sql ${CMAKE_INSTALL_PREFIX}/share/postgresql/extension ) add_custom_target(installcheck) -add_dependencies(isolation2 install_test_extension) add_dependencies(regress install_test_extension) -add_dependencies(installcheck isolation2 regress) + +if(ENABLE_ISOLATION2_TESTS) + add_dependencies(isolation2 install_test_extension) + add_dependencies(installcheck isolation2 regress) +else() + add_dependencies(installcheck regress) +endif() # Example to run test_truncate infinite times # RegressTarget_Add(regress_config diff --git a/gpcontrib/diskquota/tests/isolation2/expected/config.out b/gpcontrib/diskquota/tests/isolation2/expected/config.out index 8ad8cbd0d08..294d8a78c19 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/config.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/config.out @@ -1,12 +1,56 @@ +--start_ignore +CREATE DATABASE diskquota; +CREATE +--end_ignore !\retcode gpconfig -c shared_preload_libraries -v $(./data/current_binary_name); +-- start_ignore +20251211:00:07:53:067251 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c shared_preload_libraries -v diskquota-2.3.so' + +-- end_ignore (exited with code 0) !\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; +-- start_ignore +20251211:00:07:53:067303 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + +-- end_ignore (exited with code 0) !\retcode gpconfig -c max_worker_processes -v 20 --skipvalidation; +-- start_ignore +20251211:00:07:53:067350 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c max_worker_processes -v 20 --skipvalidation' + +-- end_ignore (exited with code 0) !\retcode gpstop -raf; +-- start_ignore +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -raf +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:07:53:067397 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:07:54:067397 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:07:57:067397 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:07:59:067397 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + +-- end_ignore (exited with code 0) -- Show the values of all GUC variables diff --git a/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out b/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out index 3d076b36cca..045c86d1e10 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/reset_config.out @@ -1,6 +1,19 @@ !\retcode gpconfig -c diskquota.naptime -v 2; +-- start_ignore +20251211:00:10:07:077993 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 2' + +-- end_ignore (exited with code 0) !\retcode gpstop -u; +-- start_ignore +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:10:07:078041 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore (exited with code 0) 1: SHOW diskquota.naptime; diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/setup.out b/gpcontrib/diskquota/tests/isolation2/expected/setup.out similarity index 100% rename from gpcontrib/diskquota/tests/isolation2/expected7/setup.out rename to gpcontrib/diskquota/tests/isolation2/expected/setup.out diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out b/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out index 0af4cabd337..5e889f0d8bd 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_dropped_table.out @@ -2,10 +2,19 @@ !\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; -- start_ignore +20251211:00:09:08:074039 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 5 --skipvalidation' + -- end_ignore (exited with code 0) !\retcode gpstop -u; -- start_ignore +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:08:074086 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + -- end_ignore (exited with code 0) @@ -40,6 +49,32 @@ DROP -- Restart cluster fastly !\retcode gpstop -afr; -- start_ignore +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -afr +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:09:28:074151 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:09:31:074151 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:09:33:074151 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + -- end_ignore (exited with code 0) @@ -64,9 +99,18 @@ DROP !\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; -- start_ignore +20251211:00:09:45:077146 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + -- end_ignore (exited with code 0) !\retcode gpstop -u; -- start_ignore +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:45:077193 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + -- end_ignore (exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out b/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out index 0d4aa43b426..79b4a8ffcdc 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_per_segment_config.out @@ -44,12 +44,12 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 1.0 + 1 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio ---------- - 1.0 + 1 (1 row) -- cleanup truncate table diskquota.quota_config; @@ -81,12 +81,12 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 1.0 + 1 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio ---------- - 1.0 + 1 (1 row) -- cleanup truncate table diskquota.quota_config; @@ -127,12 +127,12 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 1.0 + 1 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio ---------- - 1.0 + 1 (1 row) -- cleanup truncate table diskquota.quota_config; @@ -169,12 +169,12 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 1.0 + 1 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio ---------- - 1.0 + 1 (1 row) -- cleanup truncate table diskquota.quota_config; @@ -211,7 +211,7 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 0.0 + 0 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio @@ -252,7 +252,7 @@ COMMIT SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; segratio ---------- - 0.0 + 0 (1 row) SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; segratio diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out b/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out index 5f01eee9379..53bf3c5526d 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_postmaster_restart.out @@ -27,39 +27,55 @@ SET -- expect fail 1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 157893 (seg0 127.0.0.1:6002 pid=1025673) +ERROR: schema's disk space quota exceeded with name: 17623 (seg1 172.17.0.2:7003 pid=77318) 1q: ... -- launcher should exist -- [p]ostgres is to filter out the pgrep itself !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore -2774491 +77001 +77006 +77014 +77017 +77025 +77030 +77043 +77057 +77058 +77060 -- end_ignore (exited with code 0) -- bgworker should exist !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- start_ignore -2774659 +77092 -- end_ignore (exited with code 0) -- stop postmaster -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w stop; -- start_ignore waiting for server to shut down.... done server stopped + -- end_ignore (exited with code 0) -- launcher should be terminated !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore +77001 +77006 +77014 +77017 +77025 +77030 -- end_ignore -(exited with code 1) +(exited with code 0) -- bgworker should be terminated !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- start_ignore @@ -70,13 +86,10 @@ server stopped -- start postmaster -- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 -- See https://github.com/greenplum-db/gpdb/pull/9396 -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-E" start; +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w -o "-c gp_role=dispatch" start; -- start_ignore -waiting for server to start....2022-02-14 21:41:39.147869 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""ftsprobe process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.147899 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""dtx recovery process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.147934 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.148550 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","registering background worker ""[diskquota] - launcher""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",773, -2022-02-14 21:41:39.272714 CST,,,p1017570,th1516906368,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""pg_log"".",,,,,,"SysLogger_Start","syslogger.c",986, +waiting for server to start....2025-12-11 00:09:46.602285 PST,,,p77338,th1267404928,,,,0,,,seg-1,,,,,"LOG","00000","registered custom resource manager ""Pax resource manager"" with ID 199",,,,,,,,"RegisterCustomRmgr","rmgr.c",139, +2025-12-11 00:09:46.661071 PST,,,p77338,th1267404928,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",735, done server started @@ -92,14 +105,23 @@ server started -- launcher should be restarted !\retcode pgrep -f "[p]ostgres.*launcher"; -- start_ignore -2771049 +77001 +77006 +77014 +77017 +77025 +77030 +77344 +77358 +77359 +77361 -- end_ignore (exited with code 0) -- bgworker should be restarted !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- start_ignore -2771074 +77383 -- end_ignore (exited with code 0) @@ -113,7 +135,7 @@ SET (1 row) -- expect fail 1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 158089 (seg0 127.0.0.1:6002 pid=1027799) +ERROR: schema's disk space quota exceeded with name: 17623 (seg2 172.17.0.2:7004 pid=77412) -- enlarge the quota limits 1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); set_schema_quota diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out b/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out index 2ed02900e8d..bf54d2975d9 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_rejectmap.out @@ -53,7 +53,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -98,7 +98,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -143,7 +143,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -190,7 +190,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) -- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; @@ -229,7 +229,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_rejectmap @@ -268,7 +268,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) -- Clean up the rejectmap on seg0. SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; refresh_rejectmap @@ -294,18 +294,18 @@ DROP -- Below are helper functions for testing adding uncommitted relations to rejectmap. -- -- start_ignore -CREATE OR REPLACE LANGUAGE plpythonu; +CREATE OR REPLACE LANGUAGE plpython3u; CREATE -- end_ignore CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); CREATE -- This function dumps given relation_cache entries to the given file. -CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython2u; +CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython3u; CREATE -- This function reads relation_cache entries from the given file. -CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpythonu; +CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpython3u; CREATE -- This function replaces the oid appears in the auxiliary relation's name @@ -352,7 +352,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -397,7 +397,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:6002 pid=2163) +ERROR: role's disk space quota exceeded with name: 10 (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -442,7 +442,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -487,7 +487,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -532,7 +532,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -577,7 +577,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:6002 pid=2163) +ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -624,7 +624,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -661,9 +661,9 @@ SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, 2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid -------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (4 rows) SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; @@ -672,7 +672,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. @@ -709,9 +709,9 @@ SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, 2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid -------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 + 0 | 7134 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 (4 rows) SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; @@ -720,7 +720,7 @@ SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) Success: (1 row) 1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:6002 pid=2163) +ERROR: schema's disk space quota exceeded with name: 2200 (seg0 172.17.0.2:7002 pid=70646) 1: ABORT; ABORT -- Clean up the rejectmap on seg0. diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out b/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out index 8fa95c5e291..8d84ba1c5d5 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_temporary_table.out @@ -2,10 +2,19 @@ !\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; -- start_ignore +20251211:00:08:29:070839 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 5 --skipvalidation' + -- end_ignore (exited with code 0) !\retcode gpstop -u; -- start_ignore +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:08:29:070886 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + -- end_ignore (exited with code 0) @@ -38,6 +47,32 @@ INSERT 10000 -- Restart cluster fastly !\retcode gpstop -afr; -- start_ignore +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -afr +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing Coordinator instance shutdown with mode='fast' +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Coordinator segment instance directory=/home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Attempting forceful termination of any leftover coordinator process +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Terminating processes for segment /home/gpadmin/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1 +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Stopping coordinator standby host cdw mode=fast +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown standby process on cdw +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Targeting dbid [2, 5, 3, 6, 4, 7] for shutdown +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel primary segment instance shutdown, please wait... +20251211:00:08:50:070951 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-Commencing parallel mirror segment instance shutdown, please wait... +20251211:00:08:53:070951 gpstop:cdw:gpadmin-[INFO]:-0.00% of jobs completed +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-100.00% of jobs completed +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:- Segments stopped successfully = 6 +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:- Segments with errors during stop = 0 +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:----------------------------------------------------- +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Successfully shutdown 6 of 6 segment instances +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Database successfully shutdown with no errors reported +20251211:00:08:55:070951 gpstop:cdw:gpadmin-[INFO]:-Restarting System... + -- end_ignore (exited with code 0) @@ -62,9 +97,18 @@ DROP !\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; -- start_ignore +20251211:00:09:07:073948 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.naptime -v 0 --skipvalidation' + -- end_ignore (exited with code 0) !\retcode gpstop -u; -- start_ignore +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:07:073995 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + -- end_ignore (exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out b/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out index 5f855a7b80c..9f62c0d9ffc 100644 --- a/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out +++ b/gpcontrib/diskquota/tests/isolation2/expected/test_worker_timeout.out @@ -1,6 +1,19 @@ !\retcode gpconfig -c diskquota.worker_timeout -v 1; +-- start_ignore +20251211:00:09:53:077489 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-c diskquota.worker_timeout -v 1' + +-- end_ignore (exited with code 0) !\retcode gpstop -u; +-- start_ignore +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:53:077537 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore (exited with code 0) SELECT gp_inject_fault_infinite('diskquota_worker_main', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; @@ -33,6 +46,19 @@ SELECT gp_inject_fault_infinite('diskquota_worker_main', 'resume', dbid) FROM gp ERROR: canceling statement due to user request !\retcode gpconfig -r diskquota.worker_timeout; +-- start_ignore +20251211:00:09:56:077570 gpconfig:cdw:gpadmin-[INFO]:-completed successfully with parameters '-r diskquota.worker_timeout' + +-- end_ignore (exited with code 0) !\retcode gpstop -u; +-- start_ignore +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Starting gpstop with args: -u +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Gathering information and validating the environment... +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Obtaining Cloudberry Coordinator catalog information +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Obtaining Segment details from coordinator... +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Cloudberry Version: 'postgres (Apache Cloudberry) 3.0.0-devel build dev' +20251211:00:09:56:077618 gpstop:cdw:gpadmin-[INFO]:-Signalling all postmaster processes to reload + +-- end_ignore (exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_create_extension.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_create_extension.out deleted file mode 100644 index f34d591a4da..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_create_extension.out +++ /dev/null @@ -1,15 +0,0 @@ -CREATE EXTENSION diskquota; -CREATE EXTENSION - -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- Wait after init so that diskquota.state is clean -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_drop_extension.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_drop_extension.out deleted file mode 100644 index 7e2997004dd..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_drop_extension.out +++ /dev/null @@ -1,12 +0,0 @@ -SELECT diskquota.pause(); - pause -------- - -(1 row) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -DROP EXTENSION diskquota; -DROP EXTENSION diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_dropped_table.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_dropped_table.out deleted file mode 100644 index 443e04bd91b..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_dropped_table.out +++ /dev/null @@ -1,72 +0,0 @@ --- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup - -!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; --- start_ignore --- end_ignore -(exited with code 0) -!\retcode gpstop -u; --- start_ignore --- end_ignore -(exited with code 0) - -1: CREATE SCHEMA dropped_schema; -CREATE SCHEMA -1: SET search_path TO dropped_schema; -SET -1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); -CREATE TABLE -1: INSERT INTO dropped_table SELECT generate_series(1, 10000); -INSERT 0 10000 --- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: DROP TABLE dropped_table; -DROP TABLE -1q: ... - --- Restart cluster fastly -!\retcode gpstop -afr; --- start_ignore --- end_ignore -(exited with code 0) - --- Indicates that there is no dropped table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; - oid ------ -(0 rows) --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; - tableid ---------- -(0 rows) -1: DROP SCHEMA dropped_schema CASCADE; -DROP SCHEMA -1q: ... - -!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; --- start_ignore --- end_ignore -(exited with code 0) -!\retcode gpstop -u; --- start_ignore --- end_ignore -(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_ereport_from_seg.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_ereport_from_seg.out deleted file mode 100644 index 67ae6925df9..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_ereport_from_seg.out +++ /dev/null @@ -1,62 +0,0 @@ -CREATE SCHEMA efs1; -CREATE SCHEMA -SELECT diskquota.set_schema_quota('efs1', '1MB'); - set_schema_quota ------------------- - -(1 row) -CREATE TABLE efs1.t(i int); -CREATE TABLE - -INSERT INTO efs1.t SELECT generate_series(1, 10000); -INSERT 0 10000 --- wait for refresh of diskquota and check the quota size -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - efs1 | 1 | 688128 -(1 row) - --- Enable check quota by relfilenode on seg0. -SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -INSERT INTO efs1.t SELECT generate_series(1, 10000); -INSERT 0 10000 - --- wait for refresh of diskquota and check whether the quota size changes -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view WHERE schema_name = 'efs1'; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - efs1 | 1 | 1081344 -(1 row) - -DROP TABLE efs1.t; -DROP TABLE -DROP SCHEMA efs1; -DROP SCHEMA - --- Reset fault injection points set by us at the top of this test. -SELECT gp_inject_fault_infinite('ereport_warning_from_segment', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_fast_quota_view.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_fast_quota_view.out deleted file mode 100644 index 75ee06e7da9..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_fast_quota_view.out +++ /dev/null @@ -1,182 +0,0 @@ -CREATE SCHEMA s1; -CREATE SCHEMA -CREATE SCHEMA s2; -CREATE SCHEMA - -CREATE ROLE r LOGIN SUPERUSER; -CREATE ROLE - -!\retcode mkdir -p /tmp/spc1; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode mkdir -p /tmp/spc2; --- start_ignore - --- end_ignore -(exited with code 0) - -DROP TABLESPACE IF EXISTS spc1; -DROP TABLESPACE -CREATE TABLESPACE spc1 LOCATION '/tmp/spc1'; -CREATE TABLESPACE -DROP TABLESPACE IF EXISTS spc2; -DROP TABLESPACE -CREATE TABLESPACE spc2 LOCATION '/tmp/spc2'; -CREATE TABLESPACE - -SELECT diskquota.set_schema_quota('s1', '100 MB'); - set_schema_quota ------------------- - -(1 row) -SELECT diskquota.set_schema_tablespace_quota('s2', 'spc1','100 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) -SELECT diskquota.set_role_quota('r', '100 MB'); - set_role_quota ----------------- - -(1 row) -SELECT diskquota.set_role_tablespace_quota('r', 'spc2', '100 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - --- test show_fast_schema_quota_view and show_fast_schema_tablespace_quota_view -1: BEGIN; -BEGIN -1: CREATE TABLE s1.t(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: INSERT INTO s1.t SELECT generate_series(1, 100000); -INSERT 0 100000 - -1: CREATE TABLE s2.t(i int) TABLESPACE spc1 DISTRIBUTED BY (i); -CREATE TABLE -1: INSERT INTO s2.t SELECT generate_series(1, 100000); -INSERT 0 100000 - -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- check schema quota view before transaction commits -2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - s1 | 100 | 3932160 -(1 row) -2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - s2 | spc1 | 100 | 3932160 -(1 row) - -1: COMMIT; -COMMIT -2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2: SELECT schema_name, quota_in_mb, nspsize_in_bytes FROM diskquota.show_fast_schema_quota_view; - schema_name | quota_in_mb | nspsize_in_bytes --------------+-------------+------------------ - s1 | 100 | 3932160 -(1 row) -2: SELECT schema_name, tablespace_name, quota_in_mb, nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view; - schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - s2 | spc1 | 100 | 3932160 -(1 row) - --- login r to test role quota view -1: SET ROLE r; -SET - --- test show_fast_role_quota_view and show_fast_role_tablespace_quota_view -1: BEGIN; -BEGIN -1: CREATE TABLE t1(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: INSERT INTO t1 SELECT generate_series(1, 100000); -INSERT 0 100000 - -1: CREATE TABLE t2(i int) TABLESPACE spc2 DISTRIBUTED BY (i); -CREATE TABLE -1: INSERT INTO t2 SELECT generate_series(1, 100000); -INSERT 0 100000 - -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- check role quota view before transaction commits -2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - r | 100 | 7864320 -(1 row) -2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - r | spc2 | 100 | 3932160 -(1 row) - -1: COMMIT; -COMMIT -2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2: SELECT role_name, quota_in_mb, rolsize_in_bytes FROM diskquota.show_fast_role_quota_view; - role_name | quota_in_mb | rolsize_in_bytes ------------+-------------+------------------ - r | 100 | 7864320 -(1 row) -2: SELECT role_name, tablespace_name, quota_in_mb, rolsize_tablespace_in_bytes FROM diskquota.show_fast_role_tablespace_quota_view; - role_name | tablespace_name | quota_in_mb | rolsize_tablespace_in_bytes ------------+-----------------+-------------+----------------------------- - r | spc2 | 100 | 3932160 -(1 row) - -DROP TABLE IF EXISTS s1.t; -DROP TABLE -DROP TABLE IF EXISTS s2.t; -DROP TABLE -DROP TABLE IF EXISTS t1; -DROP TABLE -DROP TABLE IF EXISTS t2; -DROP TABLE - -DROP SCHEMA IF EXISTS s1; -DROP SCHEMA -DROP SCHEMA IF EXISTS s2; -DROP SCHEMA -DROP ROLE IF EXISTS r; -DROP ROLE - -DROP TABLESPACE IF EXISTS spc1; -DROP TABLESPACE -DROP TABLESPACE IF EXISTS spc2; -DROP TABLESPACE - -!\retcode rm -rf /tmp/spc1; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode rm -rf /tmp/spc2; --- start_ignore - --- end_ignore -(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_per_segment_config.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_per_segment_config.out deleted file mode 100644 index 1a6deb8baed..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_per_segment_config.out +++ /dev/null @@ -1,269 +0,0 @@ --- Test one session read tablespace segratio, --- and at the same time, another session --- update or insert the segratio - --- start_ignore -!\retcode mkdir -p /tmp/spc101; --- start_ignore - --- end_ignore -(exited with code 0) --- end_ignore -CREATE SCHEMA s101; -CREATE SCHEMA -DROP TABLESPACE IF EXISTS spc101; -DROP TABLESPACE -CREATE TABLESPACE spc101 LOCATION '/tmp/spc101'; -CREATE TABLESPACE - --- --- There is no tablesapce per segment quota configed yet --- - --- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota -1: BEGIN; -BEGIN -1: SELECT diskquota.set_per_segment_quota('spc101', 1); - set_per_segment_quota ------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); -1: COMMIT; -COMMIT -2<: <... completed> - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 1 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- - 1 -(1 row) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE - --- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, -1: BEGIN; -BEGIN -1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_per_segment_quota('spc101', 1); -1: COMMIT; -COMMIT -2<: <... completed> - set_per_segment_quota ------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 1 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- - 1 -(1 row) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE - --- --- There is already a tablesapce per segment quota configed --- - --- Read commited, first set_per_segment_quota, then set_schema_tablespace_quota -SELECT diskquota.set_per_segment_quota('spc101', 2); - set_per_segment_quota ------------------------ - -(1 row) -1: BEGIN; -BEGIN -1: SELECT diskquota.set_per_segment_quota('spc101', 1); - set_per_segment_quota ------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); -1: COMMIT; -COMMIT -2<: <... completed> - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 1 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- - 1 -(1 row) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE - --- Read commited, first set_schema_tablespace_quota, then set_per_segment_quota, -SELECT diskquota.set_per_segment_quota('spc101', 2); - set_per_segment_quota ------------------------ - -(1 row) -1: BEGIN; -BEGIN -1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_per_segment_quota('spc101', 1); -1: COMMIT; -COMMIT -2<: <... completed> - set_per_segment_quota ------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 1 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- - 1 -(1 row) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE - --- Read commited, first delete per_segment_quota, then set_schema_tablespace_quota -SELECT diskquota.set_per_segment_quota('spc101', 2); - set_per_segment_quota ------------------------ - -(1 row) -1: BEGIN; -BEGIN -1: SELECT diskquota.set_per_segment_quota('spc101', -1); - set_per_segment_quota ------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); -1: COMMIT; -COMMIT -2<: <... completed> - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 0 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- -(0 rows) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE - --- Read commited, first set_schema_tablespace_quota, then delete tablespace per segment ratio -SELECT diskquota.set_per_segment_quota('spc101', 2); - set_per_segment_quota ------------------------ - -(1 row) -1: BEGIN; -BEGIN -1: SELECT diskquota.set_schema_tablespace_quota('s101', 'spc101','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) -2: BEGIN; -BEGIN -2&: SELECT diskquota.set_per_segment_quota('spc101', -1); -1: COMMIT; -COMMIT -2<: <... completed> - set_per_segment_quota ------------------------ - -(1 row) -2: COMMIT; -COMMIT - -SELECT segratio FROM diskquota.quota_config, pg_namespace, diskquota.target WHERE targetoid = diskquota.target.rowId AND diskquota.target.primaryOid = oid AND nspname = 's101'; - segratio ----------- - 0 -(1 row) -SELECT segratio from diskquota.quota_config, pg_tablespace where targetoid = oid and spcname = 'spc101'; - segratio ----------- -(0 rows) --- cleanup -truncate table diskquota.quota_config; -TRUNCATE TABLE -truncate table diskquota.target; -TRUNCATE TABLE -DROP SCHEMA s101; -DROP SCHEMA -DROP TABLESPACE spc101; -DROP TABLESPACE diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_postmaster_restart.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_postmaster_restart.out deleted file mode 100644 index ccc9c53a43f..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_postmaster_restart.out +++ /dev/null @@ -1,162 +0,0 @@ -!\retcode gpconfig -c "diskquota.hard_limit" -v "on" > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) -!\retcode gpstop -u > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) - -1: CREATE SCHEMA postmaster_restart_s; -CREATE SCHEMA -1: SET search_path TO postmaster_restart_s; -SET - -1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect fail -1: CREATE TABLE t1 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 17614 (seg0 127.0.0.1:7002 pid=854097) -1q: ... - --- launcher should exist --- [p]ostgres is to filter out the pgrep itself -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore -839083 -839087 -839094 -839097 -839109 -839112 -839139 -839157 -839160 - --- end_ignore -(exited with code 0) --- bgworker should exist -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore -839377 - --- end_ignore -(exited with code 0) - --- stop postmaster -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; --- start_ignore -waiting for server to shut down.... done -server stopped - --- end_ignore -(exited with code 0) - --- launcher should be terminated -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore -839083 -839087 -839094 -839097 -839109 -839112 - --- end_ignore -(exited with code 0) --- bgworker should be terminated -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore - --- end_ignore -(exited with code 1) - --- start postmaster --- -E needs to be changed to "-c gp_role=dispatch" for GPDB7 --- See https://github.com/greenplum-db/gpdb/pull/9396 -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w -o "-c gp_role=dispatch" start; --- start_ignore -waiting for server to start....2023-07-31 15:59:31.043830 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","starting PostgreSQL 12.12 (Greenplum Database 7.0.0-beta.4+dev.218.g9ec0a0a842 build dev) on x86_64-pc-linux-gnu, compiled by clang version 15.0.7, 64-bit",,,,,,,,"PostmasterMain","postmaster.c",1243, -2023-07-31 15:59:31.044012 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv4 address ""0.0.0.0"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, -2023-07-31 15:59:31.044060 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on IPv6 address ""::"", port 7000",,,,,,,,"StreamServerPort","pqcomm.c",631, -2023-07-31 15:59:31.044140 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","listening on Unix socket ""/tmp/.s.PGSQL.7000""",,,,,,,,"StreamServerPort","pqcomm.c",625, -2023-07-31 15:59:31.076319 CST,,,p854265,th579172224,,,,0,,,seg-1,,,,,"LOG","00000","redirecting log output to logging collector process",,"Future log output will appear in directory ""log"".",,,,,,"SysLogger_Start","syslogger.c",929, - done -server started - --- end_ignore -(exited with code 0) --- Hopefully the bgworker can be started in 5 seconds -!\retcode sleep 5; --- start_ignore - --- end_ignore -(exited with code 0) - --- launcher should be restarted -!\retcode pgrep -f "[p]ostgres.*launcher"; --- start_ignore -839083 -839087 -839094 -839097 -839109 -839112 -854271 -854289 -854293 - --- end_ignore -(exited with code 0) --- bgworker should be restarted -!\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; --- start_ignore -854311 - --- end_ignore -(exited with code 0) - -1: SET search_path TO postmaster_restart_s; -SET -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- expect fail -1: CREATE TABLE t2 AS SELECT generate_series(1,10000000); -ERROR: schema's disk space quota exceeded with name: 17614 (seg0 127.0.0.1:7002 pid=858309) --- enlarge the quota limits -1: SELECT diskquota.set_schema_quota('postmaster_restart_s', '100 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- expect succeed -1: CREATE TABLE t3 AS SELECT generate_series(1,1000000); -SELECT 1000000 - -1: DROP SCHEMA postmaster_restart_s CASCADE; -DROP SCHEMA -1q: ... -!\retcode gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null; --- start_ignore - --- end_ignore -(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_rejectmap.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_rejectmap.out deleted file mode 100644 index 385889fae66..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_rejectmap.out +++ /dev/null @@ -1,738 +0,0 @@ --- --- This file contains tests for dispatching rejectmap and canceling --- queries in smgrextend hook by relation's relfilenode. --- - --- Enable check quota by relfilenode on seg0. -SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'skip', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- this function return valid tablespaceoid. --- For role/namespace quota, return as it is. --- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. -CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) /*in func*/ RETURNS oid AS /*in func*/ $$ /*in func*/ BEGIN /*in func*/ /*in func*/ CASE /*in func*/ WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; /*in func*/ ELSE RETURN ( /*in func*/ CASE tablespaceoid /*in func*/ WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) /*in func*/ ELSE /*in func*/ tablespaceoid /*in func*/ END /*in func*/ ); /*in func*/ END CASE; /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; /*in func*/ -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text, segexceeded boolean) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM pg_class WHERE relname=rel::text; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT pg_class.reltablespace FROM pg_class WHERE relname = rel::TEXT) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[rel]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; -CREATE FUNCTION - --- 1. Test canceling the extending of an ordinary table. -CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO blocked_t1 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t1. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t1 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 2. Test canceling the extending of a toast relation. -CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO blocked_t2 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t2. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t2 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 3. Test canceling the extending of an appendonly relation. -CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO blocked_t3 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t3. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t3 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 4. Test canceling the extending of an index relation. -CREATE TABLE blocked_t4(i int) DISTRIBUTED BY (i); -CREATE TABLE -CREATE INDEX blocked_t4_index ON blocked_t4(i); -CREATE INDEX -INSERT INTO blocked_t4 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Insert a small amount of data into blocked_t4. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t4 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - --- Dispatch rejectmap to seg0. -SELECT block_relation_on_seg0('blocked_t4_index'::regclass, 'NAMESPACE'::text, false); - block_relation_on_seg0 ------------------------- - -(1 row) - -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - --- Session 1 will return and emit an error message saying that the quota limit is exceeded on seg0. -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) - --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 5. Test error message for NAMESPACE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. -CREATE TABLE blocked_t5(i int) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO blocked_t5 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1&: INSERT INTO blocked_t5 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - -SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE_TABLESPACE'::text, true); - block_relation_on_seg0 ------------------------- - -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 6. Test error message for ROLE_TABLESPACE_QUOTA when the quota limit is exceeded on segments. -CREATE TABLE blocked_t6(i int) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO blocked_t6 SELECT generate_series(1, 100); -INSERT 0 100 --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) - -1&: INSERT INTO blocked_t6 SELECT generate_series(1, 10000); - -SELECT gp_wait_until_triggered_fault('check_rejectmap_by_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_wait_until_triggered_fault -------------------------------- - Success: -(1 row) - -SELECT block_relation_on_seg0('blocked_t6'::regclass, 'ROLE_TABLESPACE'::text, true); - block_relation_on_seg0 ------------------------- - -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- Do some clean-ups. -DROP TABLE blocked_t1; -DROP TABLE -DROP TABLE blocked_t2; -DROP TABLE -DROP TABLE blocked_t3; -DROP TABLE -DROP TABLE blocked_t4; -DROP TABLE -DROP TABLE blocked_t5; -DROP TABLE -DROP TABLE blocked_t6; -DROP TABLE - --- --- Below are helper functions for testing adding uncommitted relations to rejectmap. --- --- start_ignore -CREATE OR REPLACE LANGUAGE plpython3u; -CREATE LANGUAGE --- end_ignore -CREATE TYPE cached_relation_entry AS ( reloid oid, relname text, relowner oid, relnamespace oid, reltablespace oid, relfilenode oid, segid int); -CREATE TYPE - --- This function dumps given relation_cache entries to the given file. -CREATE OR REPLACE FUNCTION dump_relation_cache_to_file(filename text) RETURNS void AS $$ rv = plpy.execute(""" SELECT (oid, relname, relowner, relnamespace, reltablespace, relfilenode, gp_segment_id)::cached_relation_entry FROM gp_dist_random('pg_class') """) with open(filename, 'wt') as f: for v in rv: row = v['row'] # The composite type results are different between GP6 & GP7 if isinstance(row, dict): r = "{0},{1},{2},{3},{4},{5},{6}".format( row['reloid'], row['relname'], row['relowner'], row['relnamespace'], row['reltablespace'], row['relfilenode'], row['segid']) else: r = row[1:-1] f.write(r + '\n') $$ LANGUAGE plpython3u; -CREATE FUNCTION - --- This function reads relation_cache entries from the given file. -CREATE OR REPLACE FUNCTION read_relation_cache_from_file(filename text) RETURNS SETOF cached_relation_entry AS $$ with open(filename) as f: for l in f: r = l.split(',') yield (r[0], r[1], r[2], r[3], r[4], r[5], r[6]) $$ LANGUAGE plpython3u; -CREATE FUNCTION - --- This function replaces the oid appears in the auxiliary relation's name --- with the corresponding relname of that oid. -CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text, filename text) RETURNS text AS $$ /*in func*/ BEGIN /*in func*/ RETURN COALESCE( /*in func*/ REGEXP_REPLACE(given_name, /*in func*/ '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', /*in func*/ '\1' || /*in func*/ (SELECT DISTINCT relname FROM read_relation_cache_from_file(filename) /*in func*/ WHERE REGEXP_REPLACE(given_name, '\D', '', 'g') <> '' /*in func*/ AND reloid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); /*in func*/ END; /*in func*/ $$ LANGUAGE plpgsql; -CREATE FUNCTION - --- This function helps dispatch rejectmap for the given relation to seg0. -CREATE OR REPLACE FUNCTION block_uncommitted_relation_on_seg0(rel text, block_type text, segexceeded boolean, filename text) RETURNS void AS $$ /*in func*/ DECLARE /*in func*/ bt int; /*in func*/ targetoid oid; /*in func*/ BEGIN /*in func*/ CASE block_type /*in func*/ WHEN 'NAMESPACE' THEN /*in func*/ bt = 0; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE' THEN /*in func*/ bt = 1; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'NAMESPACE_TABLESPACE' THEN /*in func*/ bt = 2; /*in func*/ SELECT relnamespace INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ WHEN 'ROLE_TABLESPACE' THEN /*in func*/ bt = 3; /*in func*/ SELECT relowner INTO targetoid /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0; /*in func*/ END CASE; /*in func*/ PERFORM diskquota.refresh_rejectmap( /*in func*/ ARRAY[ /*in func*/ ROW (targetoid, /*in func*/ (SELECT oid FROM pg_database WHERE datname = CURRENT_DATABASE()), /*in func*/ (SELECT get_real_tablespace_oid( /*in func*/ block_type, /*in func*/ (SELECT reltablespace /*in func*/ FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname = rel::text /*in func*/ AND segid = 0) /*in func*/ )), /*in func*/ bt, /*in func*/ segexceeded) /*in func*/ ]::diskquota.rejectmap_entry[], /*in func*/ ARRAY[(SELECT reloid FROM read_relation_cache_from_file(filename) /*in func*/ WHERE relname=rel::text AND segid=0)::regclass]::oid[]) /*in func*/ FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; /*in func*/ END; $$ /*in func*/ LANGUAGE 'plpgsql'; -CREATE FUNCTION - --- 7. Test that we are able to block an ordinary relation on seg0 by its relnamespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 8. Test that we are able to block an ordinary relation on seg0 by its relowner. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: role's disk space quota exceeded with name: 10 (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 9. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+----------------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 10. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, false, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 11. Test that we are able to block an ordinary relation on seg0 by its relnamespace and reltablespace (segexceeded=true). -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+----------------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_TABLESPACE_QUOTA | 2200 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, schema: 2200 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 12. Test that we are able to block an ordinary relation on seg0 by its relowner and reltablespace (segexceeded=true). -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'ROLE_TABLESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text), be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0; - segid | relnamespace | reltablespace | relowner | replace_oid_with_relname | target_type | target_oid --------+--------------+---------------+----------+--------------------------+-----------------------+------------ - 0 | 2200 | 0 | 10 | blocked_t7 | ROLE_TABLESPACE_QUOTA | 10 -(1 row) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: tablespace: 1663, role: 10 diskquota exceeded per segment quota (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 13. Test that we are able to block a toast relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i text) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+---------------------------+-----------------+------------ - 0 | 99 | 0 | 10 | pg_toast_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 99 | 0 | 10 | pg_toast_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(3 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 14. Test that we are able to block an appendonly relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aoseg_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(4 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- 15. Test that we are able to block an appendonly (column oriented) relation on seg0 by its namespace. -1: BEGIN; -BEGIN -1: CREATE TABLE blocked_t7(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -CREATE TABLE -1: SELECT dump_relation_cache_to_file('/tmp/test_rejectmap.csv'); - dump_relation_cache_to_file ------------------------------ - -(1 row) --- Inject 'suspension' to check_rejectmap_by_relfilenode on seg0. -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) --- Insert a small amount of data into blocked_t7. It will hang up at check_rejectmap_by_relfilenode(). -1&: INSERT INTO blocked_t7 SELECT generate_series(1, 10000); -SELECT block_uncommitted_relation_on_seg0('blocked_t7'::text, 'NAMESPACE'::text, true, '/tmp/test_rejectmap.csv'::text); - block_uncommitted_relation_on_seg0 ------------------------------------- - -(1 row) --- Show that blocked_t7 is blocked on seg0. -2: SELECT rel.segid, rel.relnamespace, rel.reltablespace, rel.relowner, replace_oid_with_relname(rel.relname, '/tmp/test_rejectmap.csv'::text) AS relname, be.target_type, be.target_oid FROM gp_dist_random('diskquota.rejectmap') AS be, read_relation_cache_from_file('/tmp/test_rejectmap.csv') AS rel WHERE be.segid=rel.segid AND be.relnode=rel.relfilenode AND rel.relfilenode<>0 ORDER BY relname DESC; - segid | relnamespace | reltablespace | relowner | relname | target_type | target_oid --------+--------------+---------------+----------+-------------------------------+-----------------+------------ - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7_index | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aovisimap_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 6104 | 0 | 10 | pg_aocsseg_blocked_t7 | NAMESPACE_QUOTA | 2200 - 0 | 2200 | 0 | 10 | blocked_t7 | NAMESPACE_QUOTA | 2200 -(4 rows) -SELECT gp_inject_fault_infinite('check_rejectmap_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) -1<: <... completed> -ERROR: schema's disk space quota exceeded with name: 2200 (seg0 127.0.0.1:7002 pid=841032) -1: ABORT; -ROLLBACK --- Clean up the rejectmap on seg0. -SELECT diskquota.refresh_rejectmap( ARRAY[]::diskquota.rejectmap_entry[], ARRAY[]::oid[]) FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - refresh_rejectmap -------------------- - -(1 row) - --- Reset fault injection points set by us at the top of this test. -SELECT gp_inject_fault_infinite('enable_check_quota_by_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content=0; - gp_inject_fault_infinite --------------------------- - Success: -(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_cache.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_cache.out deleted file mode 100644 index 14ad39661aa..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_cache.out +++ /dev/null @@ -1,70 +0,0 @@ -CREATE DATABASE tempdb1; -CREATE DATABASE -CREATE DATABASE tempdb2; -CREATE DATABASE - --- perpare extension -1:@db_name tempdb1: CREATE EXTENSION diskquota; -CREATE EXTENSION -1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2:@db_name tempdb2: CREATE EXTENSION diskquota; -CREATE EXTENSION -2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- create a table in tempdb1 -1:@db_name tempdb1: BEGIN; -BEGIN -1:@db_name tempdb1: CREATE TABLE t(i int); -CREATE TABLE -1:@db_name tempdb1: INSERT INTO t select generate_series(1, 10000); -INSERT 0 10000 - --- query relation_cache in tempdb2 -2:@db_name tempdb2: SELECT count(*) from diskquota.show_relation_cache(); - count -------- - 0 -(1 row) - -1:@db_name tempdb1: ABORT; -ROLLBACK - -1:@db_name tempdb1: SELECT diskquota.pause(); - pause -------- - -(1 row) -1:@db_name tempdb1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1:@db_name tempdb1: DROP EXTENSION diskquota; -DROP EXTENSION -2:@db_name tempdb2: SELECT diskquota.pause(); - pause -------- - -(1 row) -2:@db_name tempdb2: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -2:@db_name tempdb2: DROP EXTENSION diskquota; -DROP EXTENSION -1q: ... -2q: ... - -DROP DATABASE tempdb1; -DROP DATABASE -DROP DATABASE tempdb2; -DROP DATABASE diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_size.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_size.out deleted file mode 100644 index ee2e4241e82..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_relation_size.out +++ /dev/null @@ -1,104 +0,0 @@ -SELECT diskquota.pause(); - pause -------- - -(1 row) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- --- 1. Test that when a relation is dropped before diskquota.relation_size() --- applying stat(2) on the physical file, diskquota.relation_size() consumes --- the error and returns 0. --- - -CREATE TABLE t_dropped(i int) DISTRIBUTED BY (i); -CREATE TABLE --- Insert a small amount of data to 't_dropped'. -INSERT INTO t_dropped SELECT generate_series(1, 100); -INSERT 0 100 --- Shows that the size of relfilenode is not zero. -SELECT diskquota.relation_size('t_dropped'); - relation_size ---------------- - 98304 -(1 row) - --- Inject 'suspension' to servers. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) - --- Session 1 will hang before applying stat(2) to the physical file. -1&: SELECT diskquota.relation_size('t_dropped'); --- Wait until the fault is triggered to avoid the following race condition: --- The 't_dropped' table is dropped before evaluating "SELECT diskquota.relation_size('t_dropped')" --- and the query will fail with 'ERROR: relation "t_dropped" does not exist' -SELECT gp_wait_until_triggered_fault('diskquota_before_stat_relfilenode', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Drop the table. -DROP TABLE t_dropped; -DROP TABLE --- Remove the injected 'suspension'. -SELECT gp_inject_fault_infinite('diskquota_before_stat_relfilenode', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content>=0; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) --- Session 1 will continue and returns 0. -1<: <... completed> - relation_size ---------------- - 0 -(1 row) - --- 2. Test whether relation size is correct under concurrent writes for AO tables. --- Since no row is deleted, diskquota.relation_size() should be equal to --- pg_relation_size(). - -CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE TABLE -1: BEGIN; -BEGIN -1: INSERT INTO t_ao SELECT generate_series(1, 10000); -INSERT 0 10000 -2: BEGIN; -BEGIN -2: INSERT INTO t_ao SELECT generate_series(1, 10000); -INSERT 0 10000 -1: COMMIT; -COMMIT -2: COMMIT; -COMMIT -SELECT diskquota.relation_size('t_ao'); - relation_size ---------------- - 200400 -(1 row) -SELECT pg_relation_size('t_ao'); - pg_relation_size ------------------- - 200400 -(1 row) -DROP TABLE t_ao; -DROP TABLE - -SELECT diskquota.resume(); - resume --------- - -(1 row) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_temporary_table.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_temporary_table.out deleted file mode 100644 index cc666691c36..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_temporary_table.out +++ /dev/null @@ -1,70 +0,0 @@ --- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup - -!\retcode gpconfig -c diskquota.naptime -v 5 --skipvalidation; --- start_ignore --- end_ignore -(exited with code 0) -!\retcode gpstop -u; --- start_ignore --- end_ignore -(exited with code 0) - -1: CREATE SCHEMA temporary_schema; -CREATE SCHEMA -1: SET search_path TO temporary_schema; -SET -1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); -CREATE TABLE -1: INSERT INTO temporary_table SELECT generate_series(1, 10000); -INSERT 0 10000 --- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1q: ... - --- Restart cluster fastly -!\retcode gpstop -afr; --- start_ignore --- end_ignore -(exited with code 0) - --- Indicates that there is no temporary table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; - oid ------ -(0 rows) --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; - tableid ---------- -(0 rows) -1: DROP SCHEMA temporary_schema CASCADE; -DROP SCHEMA -1q: ... - -!\retcode gpconfig -c diskquota.naptime -v 0 --skipvalidation; --- start_ignore --- end_ignore -(exited with code 0) -!\retcode gpstop -u; --- start_ignore --- end_ignore -(exited with code 0) diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_truncate.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_truncate.out deleted file mode 100644 index 64b0fef803c..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_truncate.out +++ /dev/null @@ -1,86 +0,0 @@ --- Test various race conditions for TRUNCATE. - --- Case 1: Pulling active table before swapping relfilenode -CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO dummy_t1 SELECT generate_series(1, 1000); -INSERT 0 1000 --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 98304 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+-------+------- - dummy_t1 | 98304 | -1 - dummy_t1 | 32768 | 0 - dummy_t1 | 32768 | 1 - dummy_t1 | 32768 | 2 -(4 rows) - -SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1&: TRUNCATE dummy_t1; -SELECT gp_wait_until_triggered_fault('diskquota_after_smgrcreate', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT gp_inject_fault_infinite('diskquota_after_smgrcreate', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -1<: <... completed> -TRUNCATE TABLE - --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 0 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+------+------- - dummy_t1 | 0 | -1 - dummy_t1 | 0 | 0 - dummy_t1 | 0 | 1 - dummy_t1 | 0 | 2 -(4 rows) -DROP TABLE dummy_t1; -DROP TABLE diff --git a/gpcontrib/diskquota/tests/isolation2/expected7/test_vacuum.out b/gpcontrib/diskquota/tests/isolation2/expected7/test_vacuum.out deleted file mode 100644 index f40397f3ca8..00000000000 --- a/gpcontrib/diskquota/tests/isolation2/expected7/test_vacuum.out +++ /dev/null @@ -1,99 +0,0 @@ --- This file tests various race conditions when performing 'VACUUM FULL'. - --- 1. When the gpdb is performing 'VACUUM FULL' on some relation, it can be summarized --- as the following 3 steps: --- s1) create a new temporary relation (smgrcreate hook will be triggered, newly --- created relfilenode will be put into shmem). --- s2) insert data into the newly created relation from the old relation (smgrextend --- hook will be triggered, newly created relfilenode will be put into shmem). --- s3) change the old relation's relfilenode to the newly created one. --- Consider the following situation: --- If the diskquota bgworker pulls active oids before the 'VACUUM FULL' operation finishing, --- the newly created relfilenode is translated to the newly created temporary relation's oid, --- the old relation's size cannot be updated. We resolve it by making altered relations' oids --- constantly active so that the diskquota bgworker keeps updating the altered relation size --- during 'VACUUM FULL'. -CREATE TABLE dummy_t1(i int) DISTRIBUTED BY (i); -CREATE TABLE -INSERT INTO dummy_t1 SELECT generate_series(1, 1000); -INSERT 0 1000 -DELETE FROM dummy_t1; -DELETE 1000 --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 98304 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+-------+------- - dummy_t1 | 98304 | -1 - dummy_t1 | 32768 | 0 - dummy_t1 | 32768 | 1 - dummy_t1 | 32768 | 2 -(4 rows) -SELECT gp_inject_fault_infinite('object_access_post_alter', 'suspend', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1&: VACUUM FULL dummy_t1; -SELECT gp_wait_until_triggered_fault('object_access_post_alter', 1, dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_wait_until_triggered_fault -------------------------------- - Success: - Success: - Success: -(3 rows) --- Wait for the diskquota bgworker 'consumes' the newly created relfilenode from shmem. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -SELECT gp_inject_fault_infinite('object_access_post_alter', 'reset', dbid) FROM gp_segment_configuration WHERE role='p' AND content<>-1; - gp_inject_fault_infinite --------------------------- - Success: - Success: - Success: -(3 rows) -1<: <... completed> -VACUUM - --- Wait for the diskquota bgworker refreshing the size of 'dummy_t1'. -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) --- Shows that the result of pg_table_size() and diskquota.table_size are identical. -SELECT pg_table_size('dummy_t1'); - pg_table_size ---------------- - 0 -(1 row) -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid='dummy_t1'::regclass ORDER BY segid; - tableid | size | segid -----------+------+------- - dummy_t1 | 0 | -1 - dummy_t1 | 0 | 0 - dummy_t1 | 0 | 1 - dummy_t1 | 0 | 2 -(4 rows) -DROP TABLE dummy_t1; -DROP TABLE diff --git a/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql b/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql index bd4def38916..4c5f65a20c6 100644 --- a/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql +++ b/gpcontrib/diskquota/tests/isolation2/sql/test_postmaster_restart.in.sql @@ -18,7 +18,7 @@ !\retcode pgrep -f "[p]ostgres.*diskquota.*isolation2test"; -- stop postmaster -!\retcode pg_ctl -D $MASTER_DATA_DIRECTORY -w stop; +!\retcode pg_ctl -D $COORDINATOR_DATA_DIRECTORY -w stop; -- launcher should be terminated !\retcode pgrep -f "[p]ostgres.*launcher"; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out b/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out index 010aff751bd..cfa19a46114 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_appendonly.out @@ -22,13 +22,13 @@ SELECT tableid::regclass, size WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; tableid | size ---------+-------- - t_ao | 590936 + t_ao | 558168 (1 row) SELECT pg_table_size('t_ao'); pg_table_size --------------- - 590936 + 558168 (1 row) -- Query the size of t_aoco. @@ -37,13 +37,13 @@ SELECT tableid::regclass, size WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; tableid | size ---------+-------- - t_aoco | 590352 + t_aoco | 557584 (1 row) SELECT pg_table_size('t_aoco'); pg_table_size --------------- - 590352 + 557584 (1 row) -- 2. Test that we are able to perform quota limit on appendonly tables. diff --git a/gpcontrib/diskquota/tests/regress/expected/test_column.out b/gpcontrib/diskquota/tests/regress/expected/test_column.out index 185d63b8a05..61f79ca5458 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_column.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_column.out @@ -14,7 +14,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); (1 row) CREATE TABLE a2(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect fail INSERT INTO a2 SELECT generate_series(1,100000); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_copy.out b/gpcontrib/diskquota/tests/regress/expected/test_copy.out index 880f73801fd..86799232a99 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_copy.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_copy.out @@ -9,7 +9,7 @@ SELECT diskquota.set_schema_quota('s3', '1 MB'); SET search_path TO s3; \! seq 100 > /tmp/csmall.txt CREATE TABLE c (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. COPY c FROM '/tmp/csmall.txt'; -- expect failed diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out index fd3971328ce..e4d6319be48 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_pause.out @@ -16,7 +16,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- heap table CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect fail -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ERROR: schema's disk space quota exceeded with name: 110528 (seg1 127.0.0.1:6003 pid=73892) SELECT diskquota.pause(); @@ -26,7 +26,7 @@ SELECT diskquota.pause(); (1 row) CREATE TABLE t1 (i) AS SELECT generate_series(1,10000000) DISTRIBUTED BY (i); -- expect succeed -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- disable hardlimit and do some clean-ups. \! gpconfig -c "diskquota.hard_limit" -v "off" > /dev/null diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out index d6452140003..3ce86c0e952 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_role.out @@ -14,7 +14,7 @@ GRANT USAGE ON SCHEMA diskquota TO hardlimit_r; SET ROLE hardlimit_r; -- heap table CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -25,7 +25,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- temp table CREATE TEMP TABLE t2 (i) AS SELECT generate_series(1, 100000000); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -36,7 +36,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -47,7 +47,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -59,7 +59,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out index 157ec6181ac..173fcb723c8 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_schema.out @@ -11,7 +11,7 @@ SELECT diskquota.set_schema_quota('hardlimit_s', '1 MB'); SET search_path TO hardlimit_s; -- heap table CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -23,7 +23,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -34,7 +34,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -46,7 +46,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i; -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out index adc0d95d584..ba2b4fdb2ca 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_role.out @@ -21,7 +21,7 @@ SET default_tablespace = ctas_rolespc; SET ROLE hardlimit_r; -- heap table CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -33,7 +33,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -44,7 +44,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -56,7 +56,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-role's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out index 58fdaac36bb..50ac7071935 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_ctas_tablespace_schema.out @@ -19,7 +19,7 @@ SET search_path TO hardlimit_s; SET default_tablespace = ctas_schemaspc; -- heap table CREATE TABLE t1 (i) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -31,7 +31,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- toast table CREATE TABLE toast_table (i) AS SELECT ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'array' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -42,7 +42,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- ao table CREATE TABLE ao_table (i) WITH (appendonly=true) AS SELECT generate_series(1, 100000000) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'generate_series' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); @@ -54,7 +54,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- aocs table CREATE TABLE aocs_table WITH (appendonly=true, orientation=column) AS SELECT i, ARRAY(SELECT generate_series(1,10000)) FROM generate_series(1, 100000) AS i DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. [hardlimit] tablespace-schema's disk space quota exceeded SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out b/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out index e8f9f1b952e..c0ee3de0649 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_delete_quota.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('deleteschema', '1 MB'); SET search_path TO deleteschema; CREATE TABLE c (i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect failed INSERT INTO c SELECT generate_series(1,100000); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out index 1dc7e8ad928..961d56fce20 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_after_pause.out @@ -31,7 +31,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); \! gpstop -u > /dev/null CREATE SCHEMA SX; CREATE TABLE SX.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.set_schema_quota('SX', '1MB'); set_schema_quota diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out index 1c8fbc66222..c2fadb86c97 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_any_extension.out @@ -15,7 +15,7 @@ SELECT diskquota.set_schema_quota(current_schema, '1MB'); (1 row) CREATE TABLE t(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. DROP EXTENSION gp_inject_fault; -- expect success diff --git a/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out b/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out index 507d49bac3f..8827d2dff4a 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_drop_table.out @@ -8,10 +8,10 @@ SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); SET search_path TO sdrtbl; CREATE TABLE a(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE TABLE a2(i INT) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail diff --git a/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out b/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out index d309df39467..b38b931b07b 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_fast_disk_check.out @@ -2,8 +2,6 @@ CREATE SCHEMA s1; SET search_path to s1; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,200000); SELECT diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -14,7 +12,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); SELECT (pg_database_size(oid)-dbsize)/dbsize < 0.1 FROM pg_database, diskquota.show_fast_database_size_view WHERE datname='contrib_regression'; ?column? ---------- - f + t (1 row) RESET search_path; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out b/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out index 47d6bf313a3..3fbde382f86 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_fetch_table_stat.out @@ -3,7 +3,7 @@ -- the error message is preserved for us to debug. -- CREATE TABLE t_error_handling (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- Inject an error to a segment server, since this UDF is only called on segments. SELECT gp_inject_fault_infinite('diskquota_fetch_table_stat', 'error', dbid) diff --git a/gpcontrib/diskquota/tests/regress/expected/test_index.out b/gpcontrib/diskquota/tests/regress/expected/test_index.out index a35ec4f95cd..1c317f3ba62 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_index.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_index.out @@ -76,10 +76,10 @@ WHERE tableid = 'a_index'::regclass ORDER BY segid; tableid | size | segid ---------+---------+------- - a_index | 1212416 | -1 - a_index | 393216 | 0 - a_index | 393216 | 1 - a_index | 393216 | 2 + a_index | 1015808 | -1 + a_index | 327680 | 0 + a_index | 327680 | 1 + a_index | 327680 | 2 (4 rows) -- add index to tablespace indexspc @@ -93,14 +93,14 @@ SELECT diskquota.wait_for_worker_new_epoch(); SELECT schema_name,tablespace_name,quota_in_mb,nspsize_tablespace_in_bytes FROM diskquota.show_fast_schema_tablespace_quota_view WHERE schema_name ='indexschema1' and tablespace_name='indexspc'; schema_name | tablespace_name | quota_in_mb | nspsize_tablespace_in_bytes --------------+-----------------+-------------+----------------------------- - indexschema1 | indexspc | 2 | 2654208 + indexschema1 | indexspc | 2 | 2457600 (1 row) SELECT size, segid FROM diskquota.table_size , pg_class where tableid=oid and (relname='test_index_a' or relname='a_index') and segid=-1; size | segid ---------+------- - 1212416 | -1 1441792 | -1 + 1015808 | -1 (2 rows) -- expect insert fail diff --git a/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out b/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out index 30744e0f6f5..fe0347070ec 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_init_table_size_table.out @@ -34,10 +34,10 @@ ORDER BY tableid; t | 3932160 | -1 idx | 2490368 | -1 toast | 393216 | -1 - toast_idx | 327680 | -1 - ao | 1591464 | -1 + toast_idx | 163840 | -1 + ao | 1558696 | -1 ao_idx | 2490368 | -1 - aocs | 10813592 | -1 + aocs | 10649752 | -1 aocs_idx | 524288 | -1 (8 rows) @@ -58,10 +58,10 @@ ORDER BY tableid; t | 3932160 | -1 idx | 2490368 | -1 toast | 393216 | -1 - toast_idx | 327680 | -1 - ao | 1591464 | -1 + toast_idx | 163840 | -1 + ao | 1558696 | -1 ao_idx | 2490368 | -1 - aocs | 10813592 | -1 + aocs | 10649752 | -1 aocs_idx | 524288 | -1 (8 rows) diff --git a/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out b/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out index 4d80cfeaf82..06410d063f0 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_insert_after_drop.out @@ -11,7 +11,7 @@ SELECT diskquota.set_schema_quota('sdrtbl', '1 MB'); SET search_path TO sdrtbl; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100); -- expect insert fail diff --git a/gpcontrib/diskquota/tests/regress/expected/test_partition.out b/gpcontrib/diskquota/tests/regress/expected/test_partition.out index e103bbc9ee5..a531e2db302 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_partition.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_partition.out @@ -18,7 +18,7 @@ CREATE TABLE measurement ( PARTITION Mar06 START (date '2006-03-01') INCLUSIVE END (date '2016-04-01') EXCLUSIVE ); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'city_id' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. NOTICE: CREATE TABLE will create partition "measurement_1_prt_feb06" for table "measurement" NOTICE: CREATE TABLE will create partition "measurement_1_prt_mar06" for table "measurement" diff --git a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out index e5b7820484d..9b3d264ac8f 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume.out @@ -2,7 +2,7 @@ CREATE SCHEMA s1; SET search_path TO s1; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert succeed INSERT INTO a SELECT generate_series(1,100000); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out index d3320f17aac..34419ee58dc 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_pause_and_resume_multiple_db.out @@ -15,12 +15,12 @@ SELECT diskquota.wait_for_worker_new_epoch(); \c contrib_regression CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed \c test_pause_and_resume CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed \c contrib_regression @@ -111,7 +111,7 @@ SELECT diskquota.wait_for_worker_new_epoch(); -- new database should be active a (1 row) CREATE TABLE s1.a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO s1.a SELECT generate_series(1,100000); -- expect insert succeed SELECT diskquota.set_schema_quota('s1', '1 MB'); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out index b8c8930a613..f7dbccbe783 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_rejectmap.out @@ -270,15 +270,13 @@ SELECT replace_oid_with_relname(rel.relname), ORDER BY rel.relname DESC; replace_oid_with_relname | relkind | target_type | namespace_matched -------------------------------+---------+-----------------+------------------- - pg_toast_blocked_t5_index | i | NAMESPACE_QUOTA | f - pg_toast_blocked_t5 | t | NAMESPACE_QUOTA | f pg_aovisimap_blocked_t5_index | i | NAMESPACE_QUOTA | f pg_aovisimap_blocked_t5 | M | NAMESPACE_QUOTA | f pg_aocsseg_blocked_t5 | o | NAMESPACE_QUOTA | f pg_aoblkdir_blocked_t5_index | i | NAMESPACE_QUOTA | f pg_aoblkdir_blocked_t5 | b | NAMESPACE_QUOTA | f blocked_t5 | r | NAMESPACE_QUOTA | t -(8 rows) +(6 rows) -- Do some clean-ups. DROP FUNCTION replace_oid_with_relname(text); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out b/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out index 38101c0f202..5f0c3124066 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_relation_cache.out @@ -101,7 +101,7 @@ insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; select count(*) from diskquota.show_relation_cache_all_seg(); count ------- - 18 + 12 (1 row) select diskquota.check_relation_cache(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out b/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out index 69aa64a79f7..9931beeba12 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_relation_size.out @@ -1,5 +1,5 @@ CREATE TEMP TABLE t1(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t1 SELECT generate_series(1, 10000); SELECT diskquota.relation_size('t1'); @@ -15,7 +15,7 @@ SELECT pg_table_size('t1'); (1 row) CREATE TABLE t2(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t2 SELECT generate_series(1, 10000); SELECT diskquota.relation_size('t2'); @@ -67,7 +67,7 @@ SELECT pg_table_size('t2'); DROP TABLE t1, t2; DROP TABLESPACE test_spc; CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO ao SELECT generate_series(1, 10000); SELECT diskquota.relation_size('ao'); @@ -84,7 +84,7 @@ SELECT pg_relation_size('ao'); DROP TABLE ao; CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; SELECT diskquota.relation_size('aocs'); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_relkind.out b/gpcontrib/diskquota/tests/regress/expected/test_relkind.out index 54a1c76b632..30cf8646171 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_relkind.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_relkind.out @@ -7,7 +7,7 @@ CREATE TYPE test_type AS ( CREATE VIEW v AS select * from pg_class; CREATE EXTENSION diskquota; CREATE table test(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. SELECT diskquota.init_table_size_table(); init_table_size_table diff --git a/gpcontrib/diskquota/tests/regress/expected/test_rename.out b/gpcontrib/diskquota/tests/regress/expected/test_rename.out index 57573b425e8..ae96a1e797f 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_rename.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_rename.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('srs1', '1 MB'); set search_path to srs1; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); @@ -45,7 +45,7 @@ SELECT diskquota.set_role_quota('srerole', '1MB'); SET search_path TO srr1; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO srerole; -- expect insert fail diff --git a/gpcontrib/diskquota/tests/regress/expected/test_reschema.out b/gpcontrib/diskquota/tests/regress/expected/test_reschema.out index 1f0e4582828..5ede5fed2d9 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_reschema.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_reschema.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('srE', '1 MB'); SET search_path TO srE; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -- expect insert fail INSERT INTO a SELECT generate_series(1,100000); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_table_size.out b/gpcontrib/diskquota/tests/regress/expected/test_table_size.out index 27b076725bd..aa2e6442641 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_table_size.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_table_size.out @@ -1,6 +1,6 @@ -- Test tablesize table create table a(i text) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. insert into a select * from generate_series(1,10000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out b/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out index 7896ec17f3d..c29d67aa314 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_temp_role.out @@ -9,11 +9,11 @@ SELECT diskquota.set_role_quota('u3temp', '1MB'); (1 row) CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE a OWNER TO u3temp; CREATE TEMP TABLE ta(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. ALTER TABLE ta OWNER TO u3temp; -- expected failed: fill temp table diff --git a/gpcontrib/diskquota/tests/regress/expected/test_toast.out b/gpcontrib/diskquota/tests/regress/expected/test_toast.out index 92068c9785d..273f64b8582 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_toast.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_toast.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('s5', '1 MB'); SET search_path TO s5; CREATE TABLE a5 (t text) DISTRIBUTED BY (t); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'message' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'message' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a5 SELECT (SELECT diff --git a/gpcontrib/diskquota/tests/regress/expected/test_truncate.out b/gpcontrib/diskquota/tests/regress/expected/test_truncate.out index c380b4c47ba..b19df93214d 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_truncate.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_truncate.out @@ -8,10 +8,10 @@ SELECT diskquota.set_schema_quota('s7', '1 MB'); SET search_path TO s7; CREATE TABLE a (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE TABLE b (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out b/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out index aa144bd14a0..5fe2b7e4da7 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_uncommitted_table_size.out @@ -1,7 +1,7 @@ -- temp table begin; CREATE TEMP TABLE t1(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO t1 SELECT generate_series(1, 100000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -136,13 +136,13 @@ SELECT pg_table_size('ao_idx'); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; tableid | size | segid ---------+---------+------- - ao | 1591464 | -1 + ao | 1558696 | -1 (1 row) SELECT pg_table_size('ao'); pg_table_size --------------- - 1591464 + 1558696 (1 row) commit; @@ -178,13 +178,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; tableid | size | segid ---------+----------+------- - aocs | 10485912 | -1 + aocs | 10322072 | -1 (1 row) SELECT pg_table_size('aocs'); pg_table_size --------------- - 10485912 + 10322072 (1 row) commit; @@ -223,13 +223,13 @@ SELECT diskquota.wait_for_worker_new_epoch(); SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; tableid | size | segid ---------+--------+------- - aocs | 763936 | -1 + aocs | 632864 | -1 (1 row) SELECT pg_table_size('aocs'); pg_table_size --------------- - 763936 + 632864 (1 row) commit; diff --git a/gpcontrib/diskquota/tests/regress/expected/test_update.out b/gpcontrib/diskquota/tests/regress/expected/test_update.out index 2c135cc671e..e4ac6e3bad7 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_update.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_update.out @@ -8,7 +8,7 @@ SELECT diskquota.set_schema_quota('s4', '1 MB'); SET search_path TO s4; CREATE TABLE a(i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out b/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out index b35e8519d7c..af6680e02b7 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_vacuum.out @@ -8,10 +8,10 @@ SELECT diskquota.set_schema_quota('s6', '1 MB'); SET search_path TO s6; CREATE TABLE a (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. CREATE TABLE b (i int) DISTRIBUTED BY (i); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT INTO a SELECT generate_series(1,100000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out index 7c6fc7b89ce..89fe78bd02a 100644 --- a/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out +++ b/gpcontrib/diskquota/tests/regress/expected/test_worker_schedule.out @@ -40,7 +40,7 @@ CREATE DATABASE t12; \c t1 CREATE EXTENSION diskquota; CREATE TABLE f1(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f1 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -100,7 +100,7 @@ SHOW diskquota.max_workers; \c t2 CREATE EXTENSION diskquota; CREATE TABLE f2(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f2 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -118,7 +118,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t3 CREATE EXTENSION diskquota; CREATE TABLE f3(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f3 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -178,7 +178,7 @@ SHOW diskquota.max_workers; \c t4 CREATE EXTENSION diskquota; CREATE TABLE f4(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f4 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -196,7 +196,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t5 CREATE EXTENSION diskquota; CREATE TABLE f5(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f5 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -214,7 +214,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t6 CREATE EXTENSION diskquota; CREATE TABLE f6(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f6 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -232,7 +232,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t7 CREATE EXTENSION diskquota; CREATE TABLE f7(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f7 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -250,7 +250,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t8 CREATE EXTENSION diskquota; CREATE TABLE f8(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f8 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -268,7 +268,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t9 CREATE EXTENSION diskquota; CREATE TABLE f9(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f9 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -286,7 +286,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t10 CREATE EXTENSION diskquota; CREATE TABLE f10(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f10 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -304,7 +304,7 @@ SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = \c t11 CREATE EXTENSION diskquota; CREATE TABLE f11(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f11 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -364,7 +364,7 @@ DROP EXTENSION diskquota; DROP TABLE f1; CREATE EXTENSION diskquota; CREATE TABLE f1(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f1 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -396,7 +396,7 @@ DROP EXTENSION diskquota; DROP TABLE f2; CREATE EXTENSION diskquota; CREATE TABLE f2(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f2 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); @@ -540,7 +540,7 @@ DROP EXTENSION diskquota; \c t12 CREATE EXTENSION diskquota; CREATE TABLE f12(a int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Greenplum Database data distribution key for this table. +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table. HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. INSERT into f12 SELECT generate_series(0,1000); SELECT diskquota.wait_for_worker_new_epoch(); diff --git a/gpcontrib/diskquota/tests/regress/expected7/test_appendonly.out b/gpcontrib/diskquota/tests/regress/expected7/test_appendonly.out deleted file mode 100644 index cfa19a46114..00000000000 --- a/gpcontrib/diskquota/tests/regress/expected7/test_appendonly.out +++ /dev/null @@ -1,78 +0,0 @@ --- Create new schema for running tests. -CREATE SCHEMA s_appendonly; -SET search_path TO s_appendonly; -CREATE TABLE t_ao(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE TABLE t_aoco(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); --- Create an index on t_ao so that there will be pg_aoblkdir_XXX relations. -CREATE INDEX index_t ON t_ao(i); -CREATE INDEX index_t2 ON t_aoco(i); --- 1. Show that the relation's size in diskquota.table_size --- is identical to the result of pg_table_size(). -INSERT INTO t_ao SELECT generate_series(1, 100); -INSERT INTO t_aoco SELECT generate_series(1, 100); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Query the size of t_ao. -SELECT tableid::regclass, size - FROM diskquota.table_size - WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_ao') and segid=-1; - tableid | size ----------+-------- - t_ao | 558168 -(1 row) - -SELECT pg_table_size('t_ao'); - pg_table_size ---------------- - 558168 -(1 row) - --- Query the size of t_aoco. -SELECT tableid::regclass, size - FROM diskquota.table_size - WHERE tableid=(SELECT oid FROM pg_class WHERE relname='t_aoco') and segid=-1; - tableid | size ----------+-------- - t_aoco | 557584 -(1 row) - -SELECT pg_table_size('t_aoco'); - pg_table_size ---------------- - 557584 -(1 row) - --- 2. Test that we are able to perform quota limit on appendonly tables. -SELECT diskquota.set_schema_quota('s_appendonly', '2 MB'); - set_schema_quota ------------------- - -(1 row) - -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect success. -INSERT INTO t_ao SELECT generate_series(1, 100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- expect fail. -INSERT INTO t_ao SELECT generate_series(1, 10); -ERROR: schema's disk space quota exceeded with name: s_appendonly -INSERT INTO t_aoco SELECT generate_series(1, 10); -ERROR: schema's disk space quota exceeded with name: s_appendonly -DROP TABLE t_ao; -DROP TABLE t_aoco; -SET search_path TO DEFAULT; -DROP SCHEMA s_appendonly; diff --git a/gpcontrib/diskquota/tests/regress/expected7/test_init_table_size_table.out b/gpcontrib/diskquota/tests/regress/expected7/test_init_table_size_table.out deleted file mode 100644 index 38a45374795..00000000000 --- a/gpcontrib/diskquota/tests/regress/expected7/test_init_table_size_table.out +++ /dev/null @@ -1,71 +0,0 @@ --- heap table -CREATE TABLE t(i int) DISTRIBUTED BY (i); -INSERT INTO t SELECT generate_series(1, 100000); --- heap table index -CREATE INDEX idx on t(i); --- toast table -CREATE TABLE toast(t text) DISTRIBUTED BY (t); -INSERT INTO toast SELECT repeat('a', 10000) FROM generate_series(1, 1000); --- toast table index -CREATE INDEX toast_idx on toast(t); --- AO table -CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); -INSERT INTO ao SELECT generate_series(1, 100000); --- AO table index -CREATE INDEX ao_idx on ao(i); --- AOCS table -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; --- AOCS table index -CREATE INDEX aocs_idx on aocs(i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- Tables here are fetched by diskquota_fetch_table_stat() -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' -ORDER BY tableid; - tableid | size | segid ------------+----------+------- - t | 3932160 | -1 - idx | 2490368 | -1 - toast | 393216 | -1 - toast_idx | 327680 | -1 - ao | 1558696 | -1 - ao_idx | 2490368 | -1 - aocs | 10649752 | -1 - aocs_idx | 524288 | -1 -(8 rows) - --- init diskquota.table_size -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - --- diskquota.table_size should not change after init_table_size_table() -SELECT tableid::regclass, size, segid -FROM diskquota.table_size -WHERE segid = -1 AND tableid::regclass::name NOT LIKE '%.%' -ORDER BY tableid; - tableid | size | segid ------------+----------+------- - t | 3932160 | -1 - idx | 2490368 | -1 - toast | 393216 | -1 - toast_idx | 327680 | -1 - ao | 1558696 | -1 - ao_idx | 2490368 | -1 - aocs | 10649752 | -1 - aocs_idx | 524288 | -1 -(8 rows) - -DROP TABLE t; -DROP TABLE toast; -DROP TABLE ao; -DROP TABLE aocs; diff --git a/gpcontrib/diskquota/tests/regress/expected7/test_rejectmap.out b/gpcontrib/diskquota/tests/regress/expected7/test_rejectmap.out deleted file mode 100644 index f7dbccbe783..00000000000 --- a/gpcontrib/diskquota/tests/regress/expected7/test_rejectmap.out +++ /dev/null @@ -1,292 +0,0 @@ --- --- This file contains tests for dispatching and quering rejectmap. --- -CREATE SCHEMA s_rejectmap; -SET search_path TO s_rejectmap; --- This function replaces the oid appears in the auxiliary relation's name --- with the corresponding relname of that oid. -CREATE OR REPLACE FUNCTION replace_oid_with_relname(given_name text) - RETURNS text AS $$ - BEGIN - RETURN COALESCE( - REGEXP_REPLACE(given_name, - '^(pg_toast_|pg_aoseg_|pg_aovisimap_|pg_aoblkdir_|pg_aocsseg_)\d+', - '\1' || - (SELECT relname FROM pg_class - WHERE oid=REGEXP_REPLACE(given_name, '\D', '', 'g')::oid), 'g'), given_name); - END; -$$ LANGUAGE plpgsql; --- this function return valid tablespaceoid. --- For role/namespace quota, return as it is. --- For namespace_tablespace/role_tablespace quota, return non-zero tablespaceoid. -CREATE OR REPLACE FUNCTION get_real_tablespace_oid(block_type text, tablespaceoid oid) - RETURNS oid AS -$$ -BEGIN - CASE - WHEN (block_type = 'NAMESPACE') OR (block_type = 'ROLE') THEN RETURN tablespaceoid; - ELSE RETURN ( - CASE tablespaceoid - WHEN 0 THEN (SELECT dattablespace FROM pg_database WHERE datname = CURRENT_DATABASE()) - ELSE - tablespaceoid - END - ); - END CASE; -END; -$$ LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION block_relation_on_seg0(rel regclass, block_type text) - RETURNS void AS $$ - DECLARE - bt int; - targetoid oid; - tablespaceoid oid; - BEGIN - SELECT reltablespace INTO tablespaceoid FROM pg_class WHERE relname=rel::text; - CASE block_type - WHEN 'NAMESPACE' THEN - bt = 0; - SELECT relnamespace INTO targetoid - FROM pg_class WHERE relname=rel::text; - WHEN 'ROLE' THEN - bt = 1; - SELECT relowner INTO targetoid - FROM pg_class WHERE relname=rel::text; - WHEN 'NAMESPACE_TABLESPACE' THEN - bt = 2; - SELECT relnamespace INTO targetoid - FROM pg_class WHERE relname=rel::text; - WHEN 'ROLE_TABLESPACE' THEN - bt = 3; - SELECT relowner INTO targetoid - FROM pg_class WHERE relname=rel::text; - END CASE; - PERFORM diskquota.refresh_rejectmap( - ARRAY[ - ROW(targetoid, - (SELECT oid FROM pg_database WHERE datname=current_database()), - (SELECT get_real_tablespace_oid(block_type, tablespaceoid)), - bt, - false) - ]::diskquota.rejectmap_entry[], - ARRAY[rel]::oid[]) - FROM gp_dist_random('gp_id') WHERE gp_segment_id=0; - END; $$ -LANGUAGE 'plpgsql'; --- --- 1. Create an ordinary table and add its oid to rejectmap on seg0. --- Check that it's relfilenode is blocked on seg0 by various conditions. --- -CREATE TABLE blocked_t1(i int) DISTRIBUTED BY (i); --- Insert an entry for blocked_t1 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace. -SELECT rel.relname, be.target_type, (be.target_oid=rel.relnamespace) AS namespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; - relname | target_type | namespace_matched -------------+-----------------+------------------- - blocked_t1 | NAMESPACE_QUOTA | t -(1 row) - --- Insert an entry for blocked_t1 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner. -SELECT rel.relname, be.target_type, (be.target_oid=rel.relowner) AS owner_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; - relname | target_type | owner_matched -------------+-------------+--------------- - blocked_t1 | ROLE_QUOTA | t -(1 row) - --- Create a tablespace to test the rest of blocking types. -\! mkdir -p /tmp/blocked_space -CREATE TABLESPACE blocked_space LOCATION '/tmp/blocked_space'; -ALTER TABLE blocked_t1 SET TABLESPACE blocked_space; --- Insert an entry for blocked_t1 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'NAMESPACE_TABLESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its namespace and tablespace. -SELECT rel.relname, be.target_type, - (be.target_oid=rel.relnamespace) AS namespace_matched, - (be.tablespace_oid=rel.reltablespace) AS tablespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; - relname | target_type | namespace_matched | tablespace_matched -------------+----------------------------+-------------------+-------------------- - blocked_t1 | NAMESPACE_TABLESPACE_QUOTA | t | t -(1 row) - --- Insert an entry for blocked_t1 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t1'::regclass, 'ROLE_TABLESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenode of blocked_t1 is blocked on seg0 by its owner and tablespace. -SELECT rel.relname, be.target_type, - (be.target_oid=rel.relowner) AS owner_matched, - (be.tablespace_oid=rel.reltablespace) AS tablespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid; - relname | target_type | owner_matched | tablespace_matched -------------+-----------------------+---------------+-------------------- - blocked_t1 | ROLE_TABLESPACE_QUOTA | t | t -(1 row) - --- --- 2. Test that the relfilenodes of toast relation together with its --- index are blocked on seg0. --- -CREATE TABLE blocked_t2(i text) DISTRIBUTED BY (i); --- Insert an entry for blocked_t2 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t2'::regclass, 'NAMESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenodes of blocked_t2 together with its toast relation and toast --- index relation are blocked on seg0 by its namespace. -SELECT replace_oid_with_relname(rel.relname), - rel.relkind, be.target_type, - (be.target_oid=rel.relnamespace) AS namespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid - ORDER BY rel.relname DESC; - replace_oid_with_relname | relkind | target_type | namespace_matched ----------------------------+---------+-----------------+------------------- - pg_toast_blocked_t2_index | i | NAMESPACE_QUOTA | f - pg_toast_blocked_t2 | t | NAMESPACE_QUOTA | f - blocked_t2 | r | NAMESPACE_QUOTA | t -(3 rows) - --- --- 3. Test that the relfilenodes of appendonly relation (row oriented) together with its --- auxiliary relations are blocked on seg0. --- -CREATE TABLE blocked_t3(i int) WITH (appendonly=true) DISTRIBUTED BY (i); -CREATE INDEX blocked_t3_index ON blocked_t3(i); --- Insert an entry for blocked_t3 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t3'::regclass, 'NAMESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenodes of blocked_t3 together with its appendonly relation and appendonly --- index relations are blocked on seg0 by its namespace. -SELECT replace_oid_with_relname(rel.relname), - rel.relkind, be.target_type, - (be.target_oid=rel.relnamespace) AS namespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid - ORDER BY rel.relname DESC; - replace_oid_with_relname | relkind | target_type | namespace_matched --------------------------------+---------+-----------------+------------------- - pg_aovisimap_blocked_t3_index | i | NAMESPACE_QUOTA | f - pg_aovisimap_blocked_t3 | M | NAMESPACE_QUOTA | f - pg_aoseg_blocked_t3 | o | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t3_index | i | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t3 | b | NAMESPACE_QUOTA | f - blocked_t3 | r | NAMESPACE_QUOTA | t -(6 rows) - --- --- 4. Test that the relfilenodes of appendonly relation (column oriented) together with its --- auxiliary relations are blocked on seg0. --- -CREATE TABLE blocked_t4(i int) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -CREATE INDEX blocked_t4_index ON blocked_t4(i); --- Insert an entry for blocked_t4 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t4'::regclass, 'NAMESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenodes of blocked_t4 together with its appendonly relation and appendonly --- index relation are blocked on seg0 by its namespace. -SELECT replace_oid_with_relname(rel.relname), - rel.relkind, be.target_type, - (be.target_oid=rel.relnamespace) AS namespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid - ORDER BY rel.relname DESC; - replace_oid_with_relname | relkind | target_type | namespace_matched --------------------------------+---------+-----------------+------------------- - pg_aovisimap_blocked_t4_index | i | NAMESPACE_QUOTA | f - pg_aovisimap_blocked_t4 | M | NAMESPACE_QUOTA | f - pg_aocsseg_blocked_t4 | o | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t4_index | i | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t4 | b | NAMESPACE_QUOTA | f - blocked_t4 | r | NAMESPACE_QUOTA | t -(6 rows) - --- --- 5. Test that the relfilenodes of toast appendonly relation (row oriented) together with its --- auxiliary relations are blocked on seg0. --- -CREATE TABLE blocked_t5(i text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -CREATE INDEX blocked_t5_index ON blocked_t5(i); --- Insert an entry for blocked_t5 to rejectmap on seg0. -SELECT block_relation_on_seg0('blocked_t5'::regclass, 'NAMESPACE'::text); - block_relation_on_seg0 ------------------------- - -(1 row) - --- Shows that the relfilenodes of blocked_t5 together with its toast relation, toast --- index relation and appendonly relations are blocked on seg0 by its namespace. -SELECT replace_oid_with_relname(rel.relname), - rel.relkind, be.target_type, - (be.target_oid=rel.relnamespace) AS namespace_matched - FROM gp_dist_random('pg_class') AS rel, - gp_dist_random('diskquota.rejectmap') AS be - WHERE rel.relfilenode=be.relnode AND be.relnode<>0 AND rel.gp_segment_id=be.segid - ORDER BY rel.relname DESC; - replace_oid_with_relname | relkind | target_type | namespace_matched --------------------------------+---------+-----------------+------------------- - pg_aovisimap_blocked_t5_index | i | NAMESPACE_QUOTA | f - pg_aovisimap_blocked_t5 | M | NAMESPACE_QUOTA | f - pg_aocsseg_blocked_t5 | o | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t5_index | i | NAMESPACE_QUOTA | f - pg_aoblkdir_blocked_t5 | b | NAMESPACE_QUOTA | f - blocked_t5 | r | NAMESPACE_QUOTA | t -(6 rows) - --- Do some clean-ups. -DROP FUNCTION replace_oid_with_relname(text); -DROP FUNCTION block_relation_on_seg0(regclass, text); -DROP FUNCTION get_real_tablespace_oid(text, oid); -DROP TABLE blocked_t1; -DROP TABLE blocked_t2; -DROP TABLE blocked_t3; -DROP TABLE blocked_t4; -DROP TABLE blocked_t5; -DROP TABLESPACE blocked_space; -SET search_path TO DEFAULT; -DROP SCHEMA s_rejectmap; diff --git a/gpcontrib/diskquota/tests/regress/expected7/test_relation_cache.out b/gpcontrib/diskquota/tests/regress/expected7/test_relation_cache.out deleted file mode 100644 index 5f0c3124066..00000000000 --- a/gpcontrib/diskquota/tests/regress/expected7/test_relation_cache.out +++ /dev/null @@ -1,127 +0,0 @@ --- init -CREATE OR REPLACE FUNCTION diskquota.check_relation_cache() -RETURNS boolean -as $$ -declare t1 oid[]; -declare t2 oid[]; -begin -t1 := (select array_agg(distinct((a).relid)) from diskquota.show_relation_cache_all_seg() as a where (a).relid != (a).primary_table_oid); -t2 := (select distinct((a).auxrel_oid) from diskquota.show_relation_cache_all_seg() as a where (a).relid = (a).primary_table_oid); -return t1 = t2; -end; -$$ LANGUAGE plpgsql; --- heap table -begin; -create table t(i int) DISTRIBUTED BY (i); -insert into t select generate_series(1, 100000); -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 3 -(1 row) - -commit; -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 0 -(1 row) - -drop table t; --- toast table -begin; -create table t(t text) DISTRIBUTED BY (t); -insert into t select array(select * from generate_series(1,1000)) from generate_series(1, 1000); -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 9 -(1 row) - -select diskquota.check_relation_cache(); - check_relation_cache ----------------------- - t -(1 row) - -commit; -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 0 -(1 row) - -drop table t; --- AO table -begin; -create table t(a int, b text) with(appendonly=true) DISTRIBUTED BY (a); -insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 18 -(1 row) - -select diskquota.check_relation_cache(); - check_relation_cache ----------------------- - t -(1 row) - -commit; -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 0 -(1 row) - -drop table t; --- AOCS table -begin; -create table t(a int, b text) with(appendonly=true, orientation=column) DISTRIBUTED BY (a); -insert into t select generate_series(1,1000) as a, repeat('a', 1000) as b; -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 12 -(1 row) - -select diskquota.check_relation_cache(); - check_relation_cache ----------------------- - t -(1 row) - -commit; -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -select count(*) from diskquota.show_relation_cache_all_seg(); - count -------- - 0 -(1 row) - -drop table t; -DROP FUNCTION diskquota.check_relation_cache(); diff --git a/gpcontrib/diskquota/tests/regress/expected7/test_uncommitted_table_size.out b/gpcontrib/diskquota/tests/regress/expected7/test_uncommitted_table_size.out deleted file mode 100644 index 43daf5ef234..00000000000 --- a/gpcontrib/diskquota/tests/regress/expected7/test_uncommitted_table_size.out +++ /dev/null @@ -1,236 +0,0 @@ --- temp table -begin; -CREATE TEMP TABLE t1(i int); -NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. -HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. -INSERT INTO t1 SELECT generate_series(1, 100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't1'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - t1 | 3932160 | -1 -(1 row) - -SELECT pg_table_size('t1'); - pg_table_size ---------------- - 3932160 -(1 row) - -commit; -DROP table t1; --- heap table -begin; -CREATE TABLE t2(i int) DISTRIBUTED BY (i); -INSERT INTO t2 SELECT generate_series(1, 100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't2'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - t2 | 3932160 | -1 -(1 row) - -SELECT pg_table_size('t2'); - pg_table_size ---------------- - 3932160 -(1 row) - -commit; --- heap table index -begin; -CREATE INDEX idx2 on t2(i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'idx2'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - idx2 | 2490368 | -1 -(1 row) - -SELECT pg_table_size('idx2'); - pg_table_size ---------------- - 2490368 -(1 row) - -commit; -DROP table t2; --- toast table -begin; -CREATE TABLE t3(t text) DISTRIBUTED BY (t); -INSERT INTO t3 SELECT repeat('a', 10000) FROM generate_series(1, 1000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 't3'::regclass and segid = -1; - tableid | size | segid ----------+--------+------- - t3 | 393216 | -1 -(1 row) - -SELECT pg_table_size('t3'); - pg_table_size ---------------- - 393216 -(1 row) - -commit; -DROP table t3; --- AO table -begin; -CREATE TABLE ao (i int) WITH (appendonly=true) DISTRIBUTED BY (i); -INSERT INTO ao SELECT generate_series(1, 100000); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= - (SELECT pg_table_size('ao')); - ?column? ----------- - t -(1 row) - -commit; --- AO table index -begin; -CREATE INDEX ao_idx on ao(i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao_idx'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - ao_idx | 2490368 | -1 -(1 row) - -SELECT pg_table_size('ao_idx'); - pg_table_size ---------------- - 2490368 -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1; - tableid | size | segid ----------+---------+------- - ao | 1558696 | -1 -(1 row) - -SELECT pg_table_size('ao'); - pg_table_size ---------------- - 1558696 -(1 row) - -commit; -DROP TABLE ao; --- AO table CTAS -begin; -CREATE TABLE ao (i) WITH(appendonly=true) AS SELECT generate_series(1, 10000) DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT (SELECT size FROM diskquota.table_size WHERE tableid = 'ao'::regclass and segid = -1)= - (SELECT pg_table_size('ao')); - ?column? ----------- - t -(1 row) - -commit; -DROP TABLE ao; --- AOCS table -begin; -CREATE TABLE aocs (i int, t text) WITH (appendonly=true, orientation=column) DISTRIBUTED BY (i); -INSERT INTO aocs SELECT i, repeat('a', 1000) FROM generate_series(1, 10000) AS i; -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; - tableid | size | segid ----------+----------+------- - aocs | 10322072 | -1 -(1 row) - -SELECT pg_table_size('aocs'); - pg_table_size ---------------- - 10322072 -(1 row) - -commit; --- AOCS table index -begin; -CREATE INDEX aocs_idx on aocs(i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs_idx'::regclass and segid = -1; - tableid | size | segid -----------+--------+------- - aocs_idx | 524288 | -1 -(1 row) - -SELECT pg_table_size('aocs_idx'); - pg_table_size ---------------- - 524288 -(1 row) - -commit; -DROP TABLE aocs; --- AOCS table CTAS -begin; -CREATE TABLE aocs WITH(appendonly=true, orientation=column) AS SELECT i, array(select * from generate_series(1,1000)) FROM generate_series(1, 100) AS i DISTRIBUTED BY (i); -SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - -SELECT tableid::regclass, size, segid FROM diskquota.table_size WHERE tableid = 'aocs'::regclass and segid = -1; - tableid | size | segid ----------+--------+------- - aocs | 632864 | -1 -(1 row) - -SELECT pg_table_size('aocs'); - pg_table_size ---------------- - 632864 -(1 row) - -commit; -DROP TABLE aocs; diff --git a/gpcontrib/diskquota/upgrade_test/CMakeLists.txt b/gpcontrib/diskquota/upgrade_test/CMakeLists.txt index bf96af5f288..5aef39535ae 100644 --- a/gpcontrib/diskquota/upgrade_test/CMakeLists.txt +++ b/gpcontrib/diskquota/upgrade_test/CMakeLists.txt @@ -1,23 +1,6 @@ include(${CMAKE_SOURCE_DIR}/cmake/Regress.cmake) -if(NOT DEFINED DISKQUOTA_DDL_CHANGE_CHECK) - set(DISKQUOTA_DDL_CHANGE_CHECK ON CACHE - STRING "Skip the DDL updates check. Should not be disabled on CI" FORCE) -endif() - -if (${GP_MAJOR_VERSION} EQUAL 6) - list(APPEND schedule_files - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_1.0--2.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--1.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.0--2.1 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.0 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.1--2.2 - ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.1 - ) - set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected") -else() - set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected7") -endif() +set(EXPECTED_DIR "${CMAKE_CURRENT_SOURCE_DIR}/expected") list(APPEND schedule_files ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.2--2.3 ${CMAKE_CURRENT_SOURCE_DIR}/schedule_2.3--2.2 @@ -37,74 +20,6 @@ regresstarget_add( REGRESS_OPTS --dbname=contrib_regression) -execute_process( - COMMAND git describe --tags --abbrev=0 - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE latest_tag - OUTPUT_STRIP_TRAILING_WHITESPACE -) - -# check whether DDL file (*.sql) is modified -file(GLOB ddl_files ${DISKQUOTA_DDL_DIR}/*.sql) -foreach(ddl IN LISTS ddl_files) - cmake_path(GET ddl FILENAME ddl) - execute_process( - COMMAND - git diff ${latest_tag} --exit-code ${ddl} - OUTPUT_QUIET - WORKING_DIRECTORY ${DISKQUOTA_DDL_DIR} - RESULT_VARIABLE "${ddl}_modified" - ) - - if("${${ddl}_modified}") - message( - NOTICE - "compared to ${latest_tag}, the DDL file ${ddl} is modified, checking if upgrade test is needed." - ) - set(DISKQUOTA_DDL_MODIFIED TRUE) - endif() -endforeach() - -# if DDL file modified, insure the last release file passed in -if(DISKQUOTA_DDL_CHANGE_CHECK AND DISKQUOTA_DDL_MODIFIED AND NOT DEFINED DISKQUOTA_LAST_RELEASE_PATH) - message( - FATAL_ERROR - "DDL file modify detected, upgrade test is required. Add -DDISKQUOTA_LAST_RELEASE_PATH=//diskquota--_.tar.gz. And re-try the generation" - ) -endif() - -# check if current version is compatible with the upgrade strategy -if(DISKQUOTA_DDL_MODIFIED AND DEFINED DISKQUOTA_LAST_RELEASE_PATH) - message(NOTICE "current version ${DISKQUOTA_VERSION}") - message(NOTICE "last version ${DISKQUOTA_LAST_VERSION}") - - # if 1.0.a = 1.0.b reject - if("${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}" STREQUAL - "${DISKQUOTA_LAST_MAJOR_VERSION}.${DISKQUOTA_LAST_MINOR_VERSION}") - message(FATAL_ERROR "should bump at last one minor version") - endif() - - # if 1.0.a to 1.2.b reject - math(EXPR DISKQUOTA_NEXT_MINOR_VERSION "${DISKQUOTA_LAST_MINOR_VERSION} + 1") - if(("${DISKQUOTA_MAJOR_VERSION}" STREQUAL "${DISKQUOTA_LAST_MAJOR_VERSION}") - AND (NOT "${DISKQUOTA_MINOR_VERSION}" STREQUAL - "${DISKQUOTA_NEXT_MINOR_VERSION}")) - message(FATAL_ERROR "should not skip any minor version") - endif() - - # if 1.a.a to 3.a.a reject - math(EXPR DISKQUOTA_NEXT_MAJOR_VERSION "${DISKQUOTA_LAST_MAJOR_VERSION} + 1") - if((NOT "${DISKQUOTA_MAJOR_VERSION}" STREQUAL - "${DISKQUOTA_LAST_MAJOR_VERSION}") - AND (NOT "${DISKQUOTA_NEXT_MAJOR_VERSION}" STREQUAL - "${DISKQUOTA_MAJOR_VERSION}")) - message(FATAL_ERROR "should not skip any major version") - endif() - - message( - NOTICE - "upgrade from ${DISKQUOTA_LAST_VERSION} to ${DISKQUOTA_VERSION} is available" - ) -endif() - -# upgrade test is not needed in feature development +# NOTE: DDL change detection and upgrade version validation logic was removed +# as diskquota is now part of the Cloudberry source tree. Upgrade testing +# should be handled as part of the Cloudberry release process if needed. diff --git a/gpcontrib/diskquota/upgrade_test/alter_test.sh b/gpcontrib/diskquota/upgrade_test/alter_test.sh index ba6268c2b28..15046a3ed73 100755 --- a/gpcontrib/diskquota/upgrade_test/alter_test.sh +++ b/gpcontrib/diskquota/upgrade_test/alter_test.sh @@ -48,12 +48,6 @@ test_alter_from() { psql -d diskquota_alter_test -c "DROP EXTENSION diskquota" } -_determine_gp_major_version() { - local includedir="$(pg_config --includedir)" - GP_MAJORVERSION=$(grep -oP '.*GP_MAJORVERSION.*"\K[^"]+' "${includedir}/pg_config.h") -} -_determine_gp_major_version - compare_versions() { # implementing string manipulation local a=${1%%.*} b=${2%%.*} @@ -68,6 +62,7 @@ compare_versions() { # Find all minor versions before current one +# The first version of diskquota for Cloudberry is 2.2 while IFS= read -r ver; do if [ "${ver}" = "${CUR_VERSION}" ]; then break @@ -75,15 +70,13 @@ while IFS= read -r ver; do if [ "${ver}" = "0.8" ]; then continue fi - # The first version of diskquota for GP7 is 2.2 - if [ "$GP_MAJORVERSION" -eq "7" ]; then - set +e - compare_versions $ver "2.2" - cmp_res=$? - set -e - if [ $cmp_res -eq "2" ]; then - continue - fi + # Skip versions before 2.2 (the legacy GP6 era) + set +e + compare_versions $ver "2.2" + cmp_res=$? + set -e + if [ $cmp_res -eq "2" ]; then + continue fi VERSIONS_TO_TEST+=("${ver}") done <<< "$ALL_VERSIONS" diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_catalog.out deleted file mode 100644 index 93b7e0ab6ab..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_catalog.out +++ /dev/null @@ -1,135 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; - typname | typname -------------------------------+---------------------------------------------- - diskquota_active_table_type | {int8,oid} - quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid} - show_fast_database_size_view | {numeric} - show_fast_role_quota_view | {name,int8,oid,numeric} - show_fast_schema_quota_view | {name,int8,oid,numeric} - state | {int4,int4,oid,tid,xid,xid,cid,cid} - table_size | {int8,int4,oid,oid,tid,xid,xid,cid,cid} -(7 rows) - --- types end --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) AS reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) AS reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; - relname | reltype | reloftype ------------------------------+-------------------------------+----------- - diskquota_active_table_type | {diskquota_active_table_type} | - quota_config | {quota_config} | - quota_config_pkey | | - state | {state} | - state_pkey | | - table_size | {table_size} | - table_size_pkey | | -(7 rows) - --- tables end --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) AS prorettype, - typeid_to_name(proargtypes) AS proargtypes, - typeid_to_name(proallargtypes) AS proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace -ORDER BY - proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl -----------------------------+-------------------------------+-------------+----------------+-------------+----------------------------+----------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota.so | - update_diskquota_db_list | {void} | {oid,int4} | | | update_diskquota_db_list | $libdir/diskquota.so | -(5 rows) - --- UDF end --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER BY - schemaname, viewname; - schemaname | viewname | definition -------------+------------------------------+------------------------------------------------------------------------------------------------------------ - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size)) AS dbsize; - diskquota | show_fast_role_quota_view | SELECT pgr.rolname AS role_name, + - | | pgc.relowner AS role_oid, + - | | qc.quotalimitmb AS quota_in_mb, + - | | sum(ts.size) AS rolsize_in_bytes + - | | FROM diskquota.table_size ts, + - | | pg_class pgc, + - | | diskquota.quota_config qc, + - | | pg_roles pgr + - | | WHERE (((pgc.relowner = qc.targetoid) AND (pgc.relowner = pgr.oid)) AND (ts.tableid = pgc.oid)) + - | | GROUP BY pgc.relowner, pgr.rolname, qc.quotalimitmb; - diskquota | show_fast_schema_quota_view | SELECT pgns.nspname AS schema_name, + - | | pgc.relnamespace AS schema_oid, + - | | qc.quotalimitmb AS quota_in_mb, + - | | sum(ts.size) AS nspsize_in_bytes + - | | FROM diskquota.table_size ts, + - | | pg_class pgc, + - | | diskquota.quota_config qc, + - | | pg_namespace pgns + - | | WHERE (((ts.tableid = pgc.oid) AND (qc.targetoid = pgc.relnamespace)) AND (pgns.oid = pgc.relnamespace))+ - | | GROUP BY pgns.nspname, pgc.relnamespace, qc.quotalimitmb + - | | ORDER BY pgns.nspname; -(3 rows) - --- views end -DROP FUNCTION typeid_to_name(oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_cleanup_quota.out deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_cleanup_quota.out +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_install.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_install.out deleted file mode 100644 index 54f9f94a86a..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_install.out +++ /dev/null @@ -1,14 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota -NOTICE: database "diskquota" does not exist, skipping --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null --- setup basic environment -\! createdb diskquota -\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null -\! gpstop -raf > /dev/null --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_migrate_to_version_1.0.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_migrate_to_version_1.0.out deleted file mode 100644 index 21ffc1e2e12..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_migrate_to_version_1.0.out +++ /dev/null @@ -1,12 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null -\! gpstop -raf > /dev/null -\! gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota.so -Segment value: diskquota.so -\c -alter extension diskquota update to '1.0'; --- downgrade to 1.0 need reboot, the version check is not in 1.0 --- worker status is undefined at just downgrade -\! gpstop -arf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_set_quota.out deleted file mode 100644 index 32ffd2dafd6..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_set_quota.out +++ /dev/null @@ -1,34 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota.so -Segment value: diskquota.so -create extension diskquota with version '1.0'; -\!sleep 5 --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok, but should fail after upgrade --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -insert into srole.b select generate_series(1,100000); -- ok, but should fail after upgrade -\!sleep 5 --- leaked resource: --- role u1 --- table s1.a, srole.b --- schema s1, srole diff --git a/gpcontrib/diskquota/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out b/gpcontrib/diskquota/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out deleted file mode 100644 index 5bf36f408e6..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/1.0_test_in_2.0_quota_create_in_1.0.out +++ /dev/null @@ -1,10 +0,0 @@ --- need run 1.0_set_quota before run this test --- FIXME add version check here -\!sleep 5 -insert into s1.a select generate_series(1, 100); -- fail -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100); -- fail -ERROR: role's disk space quota exceeded with name: u1 -drop table s1.a, srole.b; -drop schema s1, srole; -drop role u1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_catalog.out deleted file mode 100644 index 73b0501334f..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_catalog.out +++ /dev/null @@ -1,272 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; - typname | typname -----------------------------------------+---------------------------------------------------------------------------------- - diskquota_active_table_type | {int8,int2,oid} - quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} - rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} - rejectmap_entry | {bool,int4,oid,oid,oid} - rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} - relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,_oid} - show_fast_database_size_view | {numeric} - show_fast_role_quota_view | {name,int8,oid,numeric} - show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_fast_schema_quota_view | {name,int8,oid,numeric} - show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_segment_ratio_quota_view | {name,oid,float4} - state | {int4,int4,oid,tid,xid,xid,cid,cid} - table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} - target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} - target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} -(16 rows) - --- types end --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; - relname | reltype | reloftype ------------------------------+-------------------------------+----------- - diskquota_active_table_type | {diskquota_active_table_type} | - quota_config | {quota_config} | - quota_config_pkey | | - rejectmap_entry | {rejectmap_entry} | - rejectmap_entry_detail | {rejectmap_entry_detail} | - relation_cache_detail | {relation_cache_detail} | - state | {state} | - state_pkey | | - table_size | {table_size} | - table_size_pkey | | - target | {target} | - target_pkey | | - target_rowid_seq | {target_rowid_seq} | -(13 rows) - --- tables end --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl ------------------------------+-------------------------------+-------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.0.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.0.so | - pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.0.so | - pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.0.so | - refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.0.so | - relation_size | {int8} | {regclass} | | | +| | - | | | | | SELECT SUM(size)::bigint FROM ( +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | - | | | | | UNION ALL +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM pg_class WHERE oid = relation +| | - | | | | | ) AS t | | - relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.0.so | - resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.0.so | - set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.0.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.0.so | - set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.0.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.0.so | - set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.0.so | - show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.0.so | - show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.0.so | - show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | - | | | | | WITH relation_cache AS ( +| | - | | | | | SELECT diskquota.show_relation_cache() AS a +| | - | | | | | FROM gp_dist_random('gp_id') +| | - | | | | | ) +| | - | | | | | SELECT (a).* FROM relation_cache; | | - show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.0.so | - status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.0.so | - wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.0.so | -(19 rows) - --- UDF end --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; - schemaname | viewname | definition -------------+----------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------- - diskquota | rejectmap | SELECT bm.target_type, + - | | bm.target_oid, + - | | bm.database_oid, + - | | bm.tablespace_oid, + - | | bm.seg_exceeded, + - | | bm.dbnode, + - | | bm.spcnode, + - | | bm.relnode, + - | | bm.segid + - | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size + - | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; - diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + - | | SELECT pg_class.relowner, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY pg_class.relowner + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | quota_config.targetoid AS role_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + - | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + - | | WHERE (quota_config.quotatype = 1); - diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT pg_class.relowner, + - | | CASE + - | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE pg_class.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class, + - | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY pg_class.relowner, pg_class.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.primaryoid AS role_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + - | | SELECT pg_class.relnamespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY pg_class.relnamespace + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | quota_config.targetoid AS schema_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + - | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + - | | WHERE (quota_config.quotatype = 0); - diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT pg_class.relnamespace, + - | | CASE + - | | WHEN (pg_class.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE pg_class.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | pg_class, + - | | default_tablespace + - | | WHERE ((table_size.tableid = pg_class.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY pg_class.relnamespace, pg_class.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.primaryoid AS schema_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + - | | pg_tablespace.oid AS tablespace_oid, + - | | quota_config.segratio AS per_seg_quota_ratio + - | | FROM (diskquota.quota_config + - | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); -(7 rows) - --- views end -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_cleanup_quota.out deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_cleanup_quota.out +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_install.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_install.out deleted file mode 100644 index 97593816666..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_install.out +++ /dev/null @@ -1,14 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota -NOTICE: database "diskquota" does not exist, skipping --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null --- setup basic environment -\! createdb diskquota -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null -\! gpstop -raf > /dev/null --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_migrate_to_version_2.0.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_migrate_to_version_2.0.out deleted file mode 100644 index ff9b417aea7..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_migrate_to_version_2.0.out +++ /dev/null @@ -1,10 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null -\! gpstop -raf > /dev/null -\! gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.0.so -Segment value: diskquota-2.0.so -\c -alter extension diskquota update to '2.0'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_set_quota.out deleted file mode 100644 index ce97cae5581..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_set_quota.out +++ /dev/null @@ -1,61 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.0.so -Segment value: diskquota-2.0.so -create extension diskquota with version '2.0'; -\!sleep 5 --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -insert into srole.b select generate_series(1,100000); -- ok. --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -insert into rolespcrole.b select generate_series(1,100000); -- ok. -\!sleep 5 --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out deleted file mode 100644 index 2f9b5714cf8..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_1.0_quota_create_in_2.0.out +++ /dev/null @@ -1,14 +0,0 @@ --- need run 1.0_set_quota before run this test --- FIXME add version check here -\! sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name:s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name:u1 -insert into rolespcrole.b select generate_series(1, 100000); -- ok. -insert into spcs1.a select generate_series(1, 100000); -- ok. -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out b/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out deleted file mode 100644 index a36fcb4f8cd..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.0_test_in_2.1_quota_create_in_2.0.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.0_set_quota before run this test --- FIXME add version check here -\!sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_catalog.out deleted file mode 100644 index b22cec877a0..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_catalog.out +++ /dev/null @@ -1,303 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; - typname | typname -----------------------------------------+---------------------------------------------------------------------------------- - diskquota_active_table_type | {int8,int2,oid} - quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} - rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} - rejectmap_entry | {bool,int4,oid,oid,oid} - rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} - relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,_oid} - show_all_relation_view | {oid,oid,oid,oid} - show_fast_database_size_view | {numeric} - show_fast_role_quota_view | {name,int8,oid,numeric} - show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_fast_schema_quota_view | {name,int8,oid,numeric} - show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_segment_ratio_quota_view | {name,oid,float4} - state | {int4,int4,oid,tid,xid,xid,cid,cid} - table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} - target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} - target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} -(17 rows) - --- types end --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; - relname | reltype | reloftype ------------------------------+-------------------------------+----------- - diskquota_active_table_type | {diskquota_active_table_type} | - quota_config | {quota_config} | - quota_config_pkey | | - rejectmap_entry | {rejectmap_entry} | - rejectmap_entry_detail | {rejectmap_entry_detail} | - relation_cache_detail | {relation_cache_detail} | - state | {state} | - state_pkey | | - table_size | {table_size} | - table_size_pkey | | - target | {target} | - target_pkey | | - target_rowid_seq | {target_rowid_seq} | -(13 rows) - --- tables end --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl ------------------------------+-------------------------------+-------------------------+-----------------+-------------+----------------------------------------------------------------------------------------------------------------------+--------------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.1.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.1.so | - pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.1.so | - pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.1.so | - refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.1.so | - relation_size | {int8} | {regclass} | | | +| | - | | | | | SELECT SUM(size)::bigint FROM ( +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM gp_dist_random('pg_class') WHERE oid = relation +| | - | | | | | UNION ALL +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size+| | - | | | | | FROM pg_class WHERE oid = relation +| | - | | | | | ) AS t | | - relation_size_local | {int8} | {oid,oid,char,char} | | | relation_size_local | $libdir/diskquota-2.1.so | - resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.1.so | - set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.1.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.1.so | - set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.1.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.1.so | - set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.1.so | - show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.1.so | - show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.1.so | - show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | - | | | | | WITH relation_cache AS ( +| | - | | | | | SELECT diskquota.show_relation_cache() AS a +| | - | | | | | FROM gp_dist_random('gp_id') +| | - | | | | | ) +| | - | | | | | SELECT (a).* FROM relation_cache; | | - show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.1.so | - status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.1.so | - wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.1.so | -(19 rows) - --- UDF end --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; - schemaname | viewname | definition -------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - diskquota | rejectmap | SELECT bm.target_type, + - | | bm.target_oid, + - | | bm.database_oid, + - | | bm.tablespace_oid, + - | | bm.seg_exceeded, + - | | bm.dbnode, + - | | bm.spcnode, + - | | bm.relnode, + - | | bm.segid + - | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); - diskquota | show_all_relation_view | WITH relation_cache AS ( + - | | SELECT f.relid, + - | | f.primary_table_oid, + - | | f.auxrel_num, + - | | f.owneroid, + - | | f.namespaceoid, + - | | f.backendid, + - | | f.spcnode, + - | | f.dbnode, + - | | f.relnode, + - | | f.relstorage, + - | | f.auxrel_oid + - | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid)+ - | | ) + - | | SELECT union_relation.oid, + - | | union_relation.relowner, + - | | union_relation.relnamespace, + - | | union_relation.reltablespace + - | | FROM ( SELECT relation_cache.relid AS oid, + - | | relation_cache.owneroid AS relowner, + - | | relation_cache.namespaceoid AS relnamespace, + - | | relation_cache.spcnode AS reltablespace + - | | FROM relation_cache + - | | UNION + - | | SELECT pg_class.oid, + - | | pg_class.relowner, + - | | pg_class.relnamespace, + - | | pg_class.reltablespace + - | | FROM pg_class) union_relation + - | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size + - | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; - diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | quota_config.targetoid AS role_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + - | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + - | | WHERE (quota_config.quotatype = 1); - diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.primaryoid AS role_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | quota_config.targetoid AS schema_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + - | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + - | | WHERE (quota_config.quotatype = 0); - diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.primaryoid AS schema_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + - | | pg_tablespace.oid AS tablespace_oid, + - | | quota_config.segratio AS per_seg_quota_ratio + - | | FROM (diskquota.quota_config + - | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); -(8 rows) - --- views end -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_cleanup_quota.out deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_cleanup_quota.out +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_install.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_install.out deleted file mode 100644 index b8f98bb9278..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_install.out +++ /dev/null @@ -1,13 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null --- setup basic environment -\! createdb diskquota -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null -\! gpstop -raf > /dev/null --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_migrate_to_version_2.1.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_migrate_to_version_2.1.out deleted file mode 100644 index 37ee511afcb..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_migrate_to_version_2.1.out +++ /dev/null @@ -1,10 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null -\! gpstop -raf > /dev/null -\! gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.1.so -Segment value: diskquota-2.1.so -\c -alter extension diskquota update to '2.1'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_set_quota.out deleted file mode 100644 index b40938d638e..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_set_quota.out +++ /dev/null @@ -1,61 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.1.so -Segment value: diskquota-2.1.so -create extension diskquota with version '2.1'; -\!sleep 5 --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -insert into srole.b select generate_series(1,100000); -- ok. --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -insert into rolespcrole.b select generate_series(1,100000); -- ok. -\!sleep 5 --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out deleted file mode 100644 index 5c3f8c87862..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.0_quota_create_in_2.1.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.1_set_quota before run this test --- FIXME add version check here -\! sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out b/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out deleted file mode 100644 index a36fcb4f8cd..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.1_test_in_2.2_quota_create_in_2.1.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.0_set_quota before run this test --- FIXME add version check here -\!sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out index 5654d0fb781..48d2934a6c9 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_catalog.out @@ -25,8 +25,8 @@ GROUP BY t1.typname ORDER BY t1.typname; - typname | typname -----------------------------------------+---------------------------------------------------------------------------------- + typname | typname +----------------------------------------+------------------------------------------------------- diskquota_active_table_type | {int8,int2,oid} quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} @@ -43,8 +43,7 @@ ORDER BY state | {int4,int4,oid,tid,xid,xid,cid,cid} table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} - target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} -(17 rows) +(16 rows) -- types end -- tables @@ -75,7 +74,7 @@ ORDER BY table_size_pkey | | target | {target} | target_pkey | | - target_rowid_seq | {target_rowid_seq} | + target_rowid_seq | | (13 rows) -- tables end @@ -178,7 +177,7 @@ ORDER by | | f.relam + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ | | ) + - | | SELECT union_relation.oid, + + | | SELECT DISTINCT union_relation.oid, + | | union_relation.relowner, + | | union_relation.relnamespace, + | | union_relation.reltablespace + @@ -192,8 +191,7 @@ ORDER by | | pg_class.relowner, + | | pg_class.relnamespace, + | | pg_class.reltablespace + - | | FROM pg_class) union_relation + - | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; + | | FROM pg_class) union_relation; diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + | | FROM pg_class + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + @@ -237,7 +235,7 @@ ORDER by | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + | | ) + | | SELECT pg_roles.rolname AS role_name, + | | full_quota_config.primaryoid AS role_oid, + @@ -287,7 +285,7 @@ ORDER by | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + | | full_quota_config.primaryoid AS schema_oid, + diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out index d54e99e69b2..d6fbb96247b 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_migrate_to_version_2.2.out @@ -2,9 +2,9 @@ \! gpstop -raf > /dev/null \! gpconfig -s 'shared_preload_libraries' Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.2.so -Segment value: diskquota-2.2.so +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so \c alter extension diskquota update to '2.2'; \! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out index 2d2d5486c8a..5083f5747f2 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.2_set_quota.out @@ -1,8 +1,8 @@ \!gpconfig -s 'shared_preload_libraries' Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.2.so -Segment value: diskquota-2.2.so +GUC : shared_preload_libraries +Coordinator value: diskquota-2.2.so +Segment value: diskquota-2.2.so create extension diskquota with version '2.2'; select diskquota.init_table_size_table(); init_table_size_table diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out b/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out deleted file mode 100644 index 5c3f8c87862..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected/2.2_test_in_2.1_quota_create_in_2.2.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.1_set_quota before run this test --- FIXME add version check here -\! sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out index 0d74319bf04..016aecd94c9 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_catalog.out @@ -25,8 +25,8 @@ GROUP BY t1.typname ORDER BY t1.typname; - typname | typname -----------------------------------------+---------------------------------------------------------------------------------- + typname | typname +----------------------------------------+------------------------------------------------------- diskquota_active_table_type | {int8,int2,oid} quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} @@ -43,8 +43,7 @@ ORDER BY state | {int4,int4,oid,tid,xid,xid,cid,cid} table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} - target_rowid_seq | {bool,bool,name,int8,int8,int8,int8,int8,int8,int8,int4,oid,tid,xid,xid,cid,cid} -(17 rows) +(16 rows) -- types end -- tables @@ -75,7 +74,7 @@ ORDER BY table_size_pkey | | target | {target} | target_pkey | | - target_rowid_seq | {target_rowid_seq} | + target_rowid_seq | | (13 rows) -- tables end @@ -178,7 +177,7 @@ ORDER by | | f.relam + | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ | | ) + - | | SELECT union_relation.oid, + + | | SELECT DISTINCT union_relation.oid, + | | union_relation.relowner, + | | union_relation.relnamespace, + | | union_relation.reltablespace + @@ -192,8 +191,7 @@ ORDER by | | pg_class.relowner, + | | pg_class.relnamespace, + | | pg_class.reltablespace + - | | FROM pg_class) union_relation + - | | GROUP BY union_relation.oid, union_relation.relowner, union_relation.relnamespace, union_relation.reltablespace; + | | FROM pg_class) union_relation; diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + | | FROM pg_class + | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + @@ -237,7 +235,7 @@ ORDER by | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 3)) + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + | | ) + | | SELECT pg_roles.rolname AS role_name, + | | full_quota_config.primaryoid AS role_oid, + @@ -287,7 +285,7 @@ ORDER by | | config.quotalimitmb + | | FROM diskquota.quota_config config, + | | diskquota.target target + - | | WHERE (((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype)) AND (config.quotatype = 2)) + + | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + | | ) + | | SELECT pg_namespace.nspname AS schema_name, + | | full_quota_config.primaryoid AS schema_oid, + diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out index bc14c46c4b5..db67a0e36dd 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_migrate_to_version_2.3.out @@ -2,9 +2,9 @@ \! gpstop -raf > /dev/null \! gpconfig -s 'shared_preload_libraries' Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.3.so -Segment value: diskquota-2.3.so +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so \c alter extension diskquota update to '2.3'; \! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out index 57dc9145492..114f346dddf 100644 --- a/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out +++ b/gpcontrib/diskquota/upgrade_test/expected/2.3_set_quota.out @@ -1,8 +1,8 @@ \!gpconfig -s 'shared_preload_libraries' Values on all segments are consistent -GUC : shared_preload_libraries -Master value: diskquota-2.3.so -Segment value: diskquota-2.3.so +GUC : shared_preload_libraries +Coordinator value: diskquota-2.3.so +Segment value: diskquota-2.3.so create extension diskquota with version '2.3'; select diskquota.wait_for_worker_new_epoch(); wait_for_worker_new_epoch @@ -23,7 +23,6 @@ insert into s1.a select generate_series(1, 10000000); -- ok. -- role quota create schema srole; create role u1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create table srole.b (t text) distributed by (t); alter table srole.b owner to u1; select diskquota.set_role_quota('u1', '1 MB'); @@ -49,7 +48,6 @@ insert into spcs1.a select generate_series(1,100000); -- ok. \! mkdir -p /tmp/rolespc create tablespace rolespc location '/tmp/rolespc'; create role rolespcu1 nologin; -NOTICE: resource queue required -- using default resource queue "pg_default" create schema rolespcrole; create table rolespcrole.b (t text) tablespace rolespc distributed by (t); alter table rolespcrole.b owner to rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected/dummy.out b/gpcontrib/diskquota/upgrade_test/expected/dummy.out deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_catalog.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_catalog.out deleted file mode 100644 index 48d2934a6c9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_catalog.out +++ /dev/null @@ -1,308 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; - typname | typname -----------------------------------------+------------------------------------------------------- - diskquota_active_table_type | {int8,int2,oid} - quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} - rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} - rejectmap_entry | {bool,int4,oid,oid,oid} - rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} - relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} - show_all_relation_view | {oid,oid,oid,oid} - show_fast_database_size_view | {numeric} - show_fast_role_quota_view | {name,int8,oid,numeric} - show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_fast_schema_quota_view | {name,int8,oid,numeric} - show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_segment_ratio_quota_view | {name,oid,float4} - state | {int4,int4,oid,tid,xid,xid,cid,cid} - table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} - target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} -(16 rows) - --- types end --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; - relname | reltype | reloftype ------------------------------+-------------------------------+----------- - diskquota_active_table_type | {diskquota_active_table_type} | - quota_config | {quota_config} | - quota_config_pkey | | - rejectmap_entry | {rejectmap_entry} | - rejectmap_entry_detail | {rejectmap_entry_detail} | - relation_cache_detail | {relation_cache_detail} | - state | {state} | - state_pkey | | - table_size | {table_size} | - table_size_pkey | | - target | {target} | - target_pkey | | - target_rowid_seq | | -(13 rows) - --- tables end --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl ------------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.2.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.2.so | - pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.2.so | - pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.2.so | - refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.2.so | - relation_size | {int8} | {regclass} | | | +| | - | | | | | SELECT SUM(size)::bigint FROM ( +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | - | | | | | CASE WHEN EXISTS +| | - | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | - | | | | | relam) AS size +| | - | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | - | | | | | UNION ALL +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | - | | | | | CASE WHEN EXISTS +| | - | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | - | | | | | relam) AS size +| | - | | | | | FROM pg_class as relstorage WHERE oid = relation +| | - | | | | | ) AS t | | - relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.2.so | - resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.2.so | - set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.2.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.2.so | - set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.2.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.2.so | - set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.2.so | - show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.2.so | - show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.2.so | - show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | - | | | | | WITH relation_cache AS ( +| | - | | | | | SELECT diskquota.show_relation_cache() AS a +| | - | | | | | FROM gp_dist_random('gp_id') +| | - | | | | | ) +| | - | | | | | SELECT (a).* FROM relation_cache; | | - show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.2.so | - status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.2.so | - wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.2.so | -(19 rows) - --- UDF end --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; - schemaname | viewname | definition -------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - diskquota | rejectmap | SELECT bm.target_type, + - | | bm.target_oid, + - | | bm.database_oid, + - | | bm.tablespace_oid, + - | | bm.seg_exceeded, + - | | bm.dbnode, + - | | bm.spcnode, + - | | bm.relnode, + - | | bm.segid + - | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); - diskquota | show_all_relation_view | WITH relation_cache AS ( + - | | SELECT f.relid, + - | | f.primary_table_oid, + - | | f.auxrel_num, + - | | f.owneroid, + - | | f.namespaceoid, + - | | f.backendid, + - | | f.spcnode, + - | | f.dbnode, + - | | f.relnode, + - | | f.relstorage, + - | | f.auxrel_oid, + - | | f.relam + - | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ - | | ) + - | | SELECT DISTINCT union_relation.oid, + - | | union_relation.relowner, + - | | union_relation.relnamespace, + - | | union_relation.reltablespace + - | | FROM ( SELECT relation_cache.relid AS oid, + - | | relation_cache.owneroid AS relowner, + - | | relation_cache.namespaceoid AS relnamespace, + - | | relation_cache.spcnode AS reltablespace + - | | FROM relation_cache + - | | UNION + - | | SELECT pg_class.oid, + - | | pg_class.relowner, + - | | pg_class.relnamespace, + - | | pg_class.reltablespace + - | | FROM pg_class) union_relation; - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size + - | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; - diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | quota_config.targetoid AS role_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + - | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + - | | WHERE (quota_config.quotatype = 1); - diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.primaryoid AS role_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | quota_config.targetoid AS schema_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + - | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + - | | WHERE (quota_config.quotatype = 0); - diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.primaryoid AS schema_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + - | | pg_tablespace.oid AS tablespace_oid, + - | | quota_config.segratio AS per_seg_quota_ratio + - | | FROM (diskquota.quota_config + - | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); -(8 rows) - --- views end -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_cleanup_quota.out deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_cleanup_quota.out +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_install.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_install.out deleted file mode 100644 index c4b7f4c95ce..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_install.out +++ /dev/null @@ -1,13 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null --- setup basic environment -\! createdb diskquota -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null -\! gpstop -raf > /dev/null --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_migrate_to_version_2.2.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_migrate_to_version_2.2.out deleted file mode 100644 index d6fbb96247b..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_migrate_to_version_2.2.out +++ /dev/null @@ -1,10 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.2.so' > /dev/null -\! gpstop -raf > /dev/null -\! gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Coordinator value: diskquota-2.2.so -Segment value: diskquota-2.2.so -\c -alter extension diskquota update to '2.2'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_set_quota.out deleted file mode 100644 index 5083f5747f2..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_set_quota.out +++ /dev/null @@ -1,72 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Coordinator value: diskquota-2.2.so -Segment value: diskquota-2.2.so -create extension diskquota with version '2.2'; -select diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) - -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -insert into srole.b select generate_series(1,100000); -- ok. --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -insert into rolespcrole.b select generate_series(1,100000); -- ok. -\!sleep 5 --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out b/gpcontrib/diskquota/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out deleted file mode 100644 index aab1cb100c1..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.2_test_in_2.3_quota_create_in_2.2.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.3_set_quota before run this test --- FIXME add version check here -\! sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_catalog.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_catalog.out deleted file mode 100644 index 016aecd94c9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_catalog.out +++ /dev/null @@ -1,308 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; - typname | typname -----------------------------------------+------------------------------------------------------- - diskquota_active_table_type | {int8,int2,oid} - quota_config | {int8,int4,int4,oid,oid,tid,xid,xid,cid,cid,float4} - rejectmap | {bool,int4,text,oid,oid,oid,oid,oid,oid} - rejectmap_entry | {bool,int4,oid,oid,oid} - rejectmap_entry_detail | {bool,int4,text,oid,oid,oid,oid,oid,oid} - relation_cache_detail | {char,int4,int4,oid,oid,oid,oid,oid,oid,oid,oid,_oid} - show_all_relation_view | {oid,oid,oid,oid} - show_fast_database_size_view | {numeric} - show_fast_role_quota_view | {name,int8,oid,numeric} - show_fast_role_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_fast_schema_quota_view | {name,int8,oid,numeric} - show_fast_schema_tablespace_quota_view | {name,name,int8,oid,oid,numeric} - show_segment_ratio_quota_view | {name,oid,float4} - state | {int4,int4,oid,tid,xid,xid,cid,cid} - table_size | {int8,int2,int4,oid,oid,tid,xid,xid,cid,cid} - target | {int4,int4,int4,oid,oid,oid,tid,xid,xid,cid,cid} -(16 rows) - --- types end --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; - relname | reltype | reloftype ------------------------------+-------------------------------+----------- - diskquota_active_table_type | {diskquota_active_table_type} | - quota_config | {quota_config} | - quota_config_pkey | | - rejectmap_entry | {rejectmap_entry} | - rejectmap_entry_detail | {rejectmap_entry_detail} | - relation_cache_detail | {relation_cache_detail} | - state | {state} | - state_pkey | | - table_size | {table_size} | - table_size_pkey | | - target | {target} | - target_pkey | | - target_rowid_seq | | -(13 rows) - --- tables end --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; - proname | prorettype | proargtypes | proallargtypes | proargmodes | prosrc | probin | proacl ------------------------------+-------------------------------+-------------------------+-----------------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------+-------- - diskquota_fetch_table_stat | {diskquota_active_table_type} | {int4,_oid} | | | diskquota_fetch_table_stat | $libdir/diskquota-2.3.so | - init_table_size_table | {void} | | | | init_table_size_table | $libdir/diskquota-2.3.so | - pause | {void} | | | | diskquota_pause | $libdir/diskquota-2.3.so | - pull_all_table_size | {record} | | {oid,int8,int2} | {o,o,o} | pull_all_table_size | $libdir/diskquota-2.3.so | - refresh_rejectmap | {void} | {_rejectmap_entry,_oid} | | | refresh_rejectmap | $libdir/diskquota-2.3.so | - relation_size | {int8} | {regclass} | | | +| | - | | | | | SELECT SUM(size)::bigint FROM ( +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | - | | | | | CASE WHEN EXISTS +| | - | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | - | | | | | relam) AS size +| | - | | | | | FROM gp_dist_random('pg_class') as relstorage WHERE oid = relation +| | - | | | | | UNION ALL +| | - | | | | | SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, +| | - | | | | | CASE WHEN EXISTS +| | - | | | | | (SELECT FROM pg_catalog.pg_attribute WHERE attrelid = 'pg_class'::regclass AND attname = 'relstorage') THEN relstorage::"char" ELSE ''::"char" END,+| | - | | | | | relam) AS size +| | - | | | | | FROM pg_class as relstorage WHERE oid = relation +| | - | | | | | ) AS t | | - relation_size_local | {int8} | {oid,oid,char,char,oid} | | | relation_size_local | $libdir/diskquota-2.3.so | - resume | {void} | | | | diskquota_resume | $libdir/diskquota-2.3.so | - set_per_segment_quota | {void} | {text,float4} | | | set_per_segment_quota | $libdir/diskquota-2.3.so | - set_role_quota | {void} | {text,text} | | | set_role_quota | $libdir/diskquota-2.3.so | - set_role_tablespace_quota | {void} | {text,text,text} | | | set_role_tablespace_quota | $libdir/diskquota-2.3.so | - set_schema_quota | {void} | {text,text} | | | set_schema_quota | $libdir/diskquota-2.3.so | - set_schema_tablespace_quota | {void} | {text,text,text} | | | set_schema_tablespace_quota | $libdir/diskquota-2.3.so | - show_rejectmap | {rejectmap_entry_detail} | | | | show_rejectmap | $libdir/diskquota-2.3.so | - show_relation_cache | {relation_cache_detail} | | | | show_relation_cache | $libdir/diskquota-2.3.so | - show_relation_cache_all_seg | {relation_cache_detail} | | | | +| | - | | | | | WITH relation_cache AS ( +| | - | | | | | SELECT diskquota.show_relation_cache() AS a +| | - | | | | | FROM gp_dist_random('gp_id') +| | - | | | | | ) +| | - | | | | | SELECT (a).* FROM relation_cache; | | - show_worker_epoch | {int8} | | | | show_worker_epoch | $libdir/diskquota-2.3.so | - status | {record} | | {text,text} | {t,t} | diskquota_status | $libdir/diskquota-2.3.so | - wait_for_worker_new_epoch | {bool} | | | | wait_for_worker_new_epoch | $libdir/diskquota-2.3.so | -(19 rows) - --- UDF end --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; - schemaname | viewname | definition -------------+----------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - diskquota | rejectmap | SELECT bm.target_type, + - | | bm.target_oid, + - | | bm.database_oid, + - | | bm.tablespace_oid, + - | | bm.seg_exceeded, + - | | bm.dbnode, + - | | bm.spcnode, + - | | bm.relnode, + - | | bm.segid + - | | FROM diskquota.show_rejectmap() bm(target_type, target_oid, database_oid, tablespace_oid, seg_exceeded, dbnode, spcnode, relnode, segid); - diskquota | show_all_relation_view | WITH relation_cache AS ( + - | | SELECT f.relid, + - | | f.primary_table_oid, + - | | f.auxrel_num, + - | | f.owneroid, + - | | f.namespaceoid, + - | | f.backendid, + - | | f.spcnode, + - | | f.dbnode, + - | | f.relnode, + - | | f.relstorage, + - | | f.auxrel_oid, + - | | f.relam + - | | FROM diskquota.show_relation_cache() f(relid, primary_table_oid, auxrel_num, owneroid, namespaceoid, backendid, spcnode, dbnode, relnode, relstorage, auxrel_oid, relam)+ - | | ) + - | | SELECT DISTINCT union_relation.oid, + - | | union_relation.relowner, + - | | union_relation.relnamespace, + - | | union_relation.reltablespace + - | | FROM ( SELECT relation_cache.relid AS oid, + - | | relation_cache.owneroid AS relowner, + - | | relation_cache.namespaceoid AS relnamespace, + - | | relation_cache.spcnode AS reltablespace + - | | FROM relation_cache + - | | UNION + - | | SELECT pg_class.oid, + - | | pg_class.relowner, + - | | pg_class.relnamespace, + - | | pg_class.reltablespace + - | | FROM pg_class) union_relation; - diskquota | show_fast_database_size_view | SELECT (( SELECT sum(pg_relation_size((pg_class.oid)::regclass)) AS sum + - | | FROM pg_class + - | | WHERE (pg_class.oid <= (16384)::oid)) + ( SELECT sum(table_size.size) AS sum + - | | FROM diskquota.table_size + - | | WHERE (table_size.segid = '-1'::integer))) AS dbsize; - diskquota | show_fast_role_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | quota_config.targetoid AS role_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_roles ON ((quota_config.targetoid = pg_roles.oid))) + - | | LEFT JOIN quota_usage ON ((pg_roles.oid = quota_usage.relowner))) + - | | WHERE (quota_config.quotatype = 1); - diskquota | show_fast_role_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relowner, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relowner, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 3)) + - | | ) + - | | SELECT pg_roles.rolname AS role_name, + - | | full_quota_config.primaryoid AS role_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS rolsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_roles ON ((full_quota_config.primaryoid = pg_roles.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_roles.oid = quota_usage.relowner) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_fast_schema_quota_view | WITH quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | quota_config.targetoid AS schema_oid, + - | | quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_in_bytes + - | | FROM ((diskquota.quota_config + - | | JOIN pg_namespace ON ((quota_config.targetoid = pg_namespace.oid))) + - | | LEFT JOIN quota_usage ON ((pg_namespace.oid = quota_usage.relnamespace))) + - | | WHERE (quota_config.quotatype = 0); - diskquota | show_fast_schema_tablespace_quota_view | WITH default_tablespace AS ( + - | | SELECT pg_database.dattablespace + - | | FROM pg_database + - | | WHERE (pg_database.datname = current_database()) + - | | ), quota_usage AS ( + - | | SELECT show_all_relation_view.relnamespace, + - | | CASE + - | | WHEN (show_all_relation_view.reltablespace = (0)::oid) THEN default_tablespace.dattablespace + - | | ELSE show_all_relation_view.reltablespace + - | | END AS reltablespace, + - | | sum(table_size.size) AS total_size + - | | FROM diskquota.table_size, + - | | diskquota.show_all_relation_view, + - | | default_tablespace + - | | WHERE ((table_size.tableid = show_all_relation_view.oid) AND (table_size.segid = '-1'::integer)) + - | | GROUP BY show_all_relation_view.relnamespace, show_all_relation_view.reltablespace, default_tablespace.dattablespace + - | | ), full_quota_config AS ( + - | | SELECT target.primaryoid, + - | | target.tablespaceoid, + - | | config.quotalimitmb + - | | FROM diskquota.quota_config config, + - | | diskquota.target target + - | | WHERE ((config.targetoid = (target.rowid)::oid) AND (config.quotatype = target.quotatype) AND (config.quotatype = 2)) + - | | ) + - | | SELECT pg_namespace.nspname AS schema_name, + - | | full_quota_config.primaryoid AS schema_oid, + - | | pg_tablespace.spcname AS tablespace_name, + - | | full_quota_config.tablespaceoid AS tablespace_oid, + - | | full_quota_config.quotalimitmb AS quota_in_mb, + - | | COALESCE(quota_usage.total_size, (0)::numeric) AS nspsize_tablespace_in_bytes + - | | FROM (((full_quota_config + - | | JOIN pg_namespace ON ((full_quota_config.primaryoid = pg_namespace.oid))) + - | | JOIN pg_tablespace ON ((full_quota_config.tablespaceoid = pg_tablespace.oid))) + - | | LEFT JOIN quota_usage ON (((pg_namespace.oid = quota_usage.relnamespace) AND (pg_tablespace.oid = quota_usage.reltablespace)))); - diskquota | show_segment_ratio_quota_view | SELECT pg_tablespace.spcname AS tablespace_name, + - | | pg_tablespace.oid AS tablespace_oid, + - | | quota_config.segratio AS per_seg_quota_ratio + - | | FROM (diskquota.quota_config + - | | JOIN pg_tablespace ON (((quota_config.targetoid = pg_tablespace.oid) AND (quota_config.quotatype = 4)))); -(8 rows) - --- views end -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_cleanup_quota.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_cleanup_quota.out deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_cleanup_quota.out +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_install.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_install.out deleted file mode 100644 index 4738c064a82..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_install.out +++ /dev/null @@ -1,13 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null --- setup basic environment -\! createdb diskquota -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null -\! gpstop -raf > /dev/null --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_migrate_to_version_2.3.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_migrate_to_version_2.3.out deleted file mode 100644 index db67a0e36dd..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_migrate_to_version_2.3.out +++ /dev/null @@ -1,10 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.3.so' > /dev/null -\! gpstop -raf > /dev/null -\! gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Coordinator value: diskquota-2.3.so -Segment value: diskquota-2.3.so -\c -alter extension diskquota update to '2.3'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_set_quota.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_set_quota.out deleted file mode 100644 index 114f346dddf..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_set_quota.out +++ /dev/null @@ -1,66 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' -Values on all segments are consistent -GUC : shared_preload_libraries -Coordinator value: diskquota-2.3.so -Segment value: diskquota-2.3.so -create extension diskquota with version '2.3'; -select diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) - --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); - set_schema_quota ------------------- - -(1 row) - -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); - set_role_quota ----------------- - -(1 row) - -insert into srole.b select generate_series(1,100000); -- ok. --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); - set_schema_tablespace_quota ------------------------------ - -(1 row) - -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); - set_role_tablespace_quota ---------------------------- - -(1 row) - -insert into rolespcrole.b select generate_series(1,100000); -- ok. -\!sleep 5 --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out b/gpcontrib/diskquota/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out deleted file mode 100644 index 71c24e5865b..00000000000 --- a/gpcontrib/diskquota/upgrade_test/expected7/2.3_test_in_2.2_quota_create_in_2.3.out +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.2_set_quota before run this test --- FIXME add version check here -\! sleep 5 -insert into s1.a select generate_series(1, 10000000); -- fail. -ERROR: schema's disk space quota exceeded with name: s1 -insert into srole.b select generate_series(1, 100000); -- fail. -ERROR: role's disk space quota exceeded with name: u1 -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -ERROR: tablespace: rolespc, role: rolespcu1 diskquota exceeded -insert into spcs1.a select generate_series(1, 100000); -- fail. -ERROR: tablespace: schemaspc, schema: spcs1 diskquota exceeded -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/schedule_1.0--2.0 b/gpcontrib/diskquota/upgrade_test/schedule_1.0--2.0 deleted file mode 100644 index 2b40ab590f8..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_1.0--2.0 +++ /dev/null @@ -1,8 +0,0 @@ -test: 1.0_install -test: 1.0_set_quota -test: 1.0_catalog -test: 2.0_migrate_to_version_2.0 -test: 2.0_catalog -# run 1.0 behavior test using 2.0 DDL and binary -test: 1.0_test_in_2.0_quota_create_in_1.0 -test: 1.0_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.0--1.0 b/gpcontrib/diskquota/upgrade_test/schedule_2.0--1.0 deleted file mode 100644 index 55a959bad72..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_2.0--1.0 +++ /dev/null @@ -1,8 +0,0 @@ -test: 2.0_install -test: 2.0_set_quota -test: 2.0_catalog -test: 1.0_migrate_to_version_1.0 -test: 1.0_catalog -# run 2.0 behavior test using 1.0 DDL and binary -test: 2.0_test_in_1.0_quota_create_in_2.0 -test: 2.0_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.0--2.1 b/gpcontrib/diskquota/upgrade_test/schedule_2.0--2.1 deleted file mode 100644 index 7aa1f1a2e3b..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_2.0--2.1 +++ /dev/null @@ -1,8 +0,0 @@ -test: 2.0_install -test: 2.0_set_quota -test: 2.0_catalog -test: 2.1_migrate_to_version_2.1 -test: 2.1_catalog -# run 2.0 behavior test using 2.1 DDL and binary -test: 2.0_test_in_2.1_quota_create_in_2.0 -test: 2.0_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.0 b/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.0 deleted file mode 100644 index 24590df38c9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.0 +++ /dev/null @@ -1,8 +0,0 @@ -test: 2.1_install -test: 2.1_set_quota -test: 2.1_catalog -test: 2.0_migrate_to_version_2.0 -test: 2.0_catalog -# run 2.1 behavior test using 2.0 DDL and binary -test: 2.1_test_in_2.0_quota_create_in_2.1 -test: 2.1_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.2 b/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.2 deleted file mode 100644 index 06307e64059..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_2.1--2.2 +++ /dev/null @@ -1,8 +0,0 @@ -test: 2.1_install -test: 2.1_set_quota -test: 2.1_catalog -test: 2.2_migrate_to_version_2.2 -test: 2.2_catalog -# run 2.1 behavior test using 2.2 DDL and binary -test: 2.1_test_in_2.2_quota_create_in_2.1 -test: 2.1_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.1 b/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.1 deleted file mode 100644 index c6f79db7b0a..00000000000 --- a/gpcontrib/diskquota/upgrade_test/schedule_2.2--2.1 +++ /dev/null @@ -1,8 +0,0 @@ -test: 2.2_install -test: 2.2_set_quota -test: 2.2_catalog -test: 2.1_migrate_to_version_2.1 -test: 2.1_catalog -# run 2.2 behavior test using 2.1 DDL and binary -test: 2.2_test_in_2.1_quota_create_in_2.2 -test: 2.2_cleanup_quota diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_catalog.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_catalog.sql deleted file mode 100644 index e376725ef83..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_catalog.sql +++ /dev/null @@ -1,80 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; - --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; --- types end - --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) AS reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) AS reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; --- tables end - --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) AS prorettype, - typeid_to_name(proargtypes) AS proargtypes, - typeid_to_name(proallargtypes) AS proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace -ORDER BY - proname; --- UDF end - --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER BY - schemaname, viewname; --- views end - -DROP FUNCTION typeid_to_name(oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_cleanup_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_cleanup_quota.sql deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_cleanup_quota.sql +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_install.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_install.sql deleted file mode 100644 index 95f758a89ed..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_install.sql +++ /dev/null @@ -1,17 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota - --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null - --- setup basic environment -\! createdb diskquota - -\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null -\! gpstop -raf > /dev/null - --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_migrate_to_version_1.0.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_migrate_to_version_1.0.sql deleted file mode 100644 index 6d9763ca66b..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_migrate_to_version_1.0.sql +++ /dev/null @@ -1,10 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota.so' > /dev/null -\! gpstop -raf > /dev/null - -\! gpconfig -s 'shared_preload_libraries' - -\c -alter extension diskquota update to '1.0'; --- downgrade to 1.0 need reboot, the version check is not in 1.0 --- worker status is undefined at just downgrade -\! gpstop -arf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql deleted file mode 100644 index cc4420dd082..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_not_work_using_2.x_binary.sql +++ /dev/null @@ -1,21 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' -\! gpstop -raf > /dev/null -drop database if exists diskquota; - --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null - --- setup basic environment -create database diskquota; - -\! gpconfig -c shared_preload_libraries -v 'diskquota-1.0.so' -\! gpconfig -c diskquota.naptime -v '1' -\! gpstop -raf > /dev/null - -create extension diskquota version '1.0' -- for now 1.o installed - -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' -\! gpstop -raf > /dev/null - --- FIXME check diskquota shoud prompt user to do upgrade diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_set_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_set_quota.sql deleted file mode 100644 index cf1516347a2..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_set_quota.sql +++ /dev/null @@ -1,25 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' - -create extension diskquota with version '1.0'; -\!sleep 5 - --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok, but should fail after upgrade - --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); -insert into srole.b select generate_series(1,100000); -- ok, but should fail after upgrade - -\!sleep 5 - --- leaked resource: --- role u1 --- table s1.a, srole.b --- schema s1, srole diff --git a/gpcontrib/diskquota/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql b/gpcontrib/diskquota/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql deleted file mode 100644 index c6aeb2f478f..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/1.0_test_in_2.0_quota_create_in_1.0.sql +++ /dev/null @@ -1,11 +0,0 @@ --- need run 1.0_set_quota before run this test --- FIXME add version check here - -\!sleep 5 - -insert into s1.a select generate_series(1, 100); -- fail -insert into srole.b select generate_series(1, 100); -- fail - -drop table s1.a, srole.b; -drop schema s1, srole; -drop role u1; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_catalog.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_catalog.sql deleted file mode 100644 index ebf5f00aa56..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_catalog.sql +++ /dev/null @@ -1,81 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; - --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; --- types end - --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; --- tables end - --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; --- UDF end - --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; --- views end - -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_cleanup_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_cleanup_quota.sql deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_cleanup_quota.sql +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_install.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_install.sql deleted file mode 100644 index b51150f6fde..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_install.sql +++ /dev/null @@ -1,17 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota - --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null - --- setup basic environment -\! createdb diskquota - -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null -\! gpstop -raf > /dev/null - --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_migrate_to_version_2.0.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_migrate_to_version_2.0.sql deleted file mode 100644 index f001f664f60..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_migrate_to_version_2.0.sql +++ /dev/null @@ -1,8 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' > /dev/null -\! gpstop -raf > /dev/null - -\! gpconfig -s 'shared_preload_libraries' - -\c -alter extension diskquota update to '2.0'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql deleted file mode 100644 index b8f3d099673..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_not_work_using_1.x_binary.sql +++ /dev/null @@ -1,23 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota - --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null - --- setup basic environment -\! createdb diskquota - -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.0.so' -\! gpstop -raf > /dev/null -\! gpconfig -c diskquota.naptime -v '1' -\! gpstop -raf > /dev/null - -\c -create extension diskquota version '2.0' -- for now 2.0 installed - -\! gpconfig -c shared_preload_libraries -v 'diskquota-1.0.so' -\! gpstop -raf > /dev/null - --- FIXME check diskquota shoud prompt user to do downgrade diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_set_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_set_quota.sql deleted file mode 100644 index 12a47e7b2bb..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_set_quota.sql +++ /dev/null @@ -1,44 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' - -create extension diskquota with version '2.0'; -\!sleep 5 - --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. - --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); -insert into srole.b select generate_series(1,100000); -- ok. - --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. - --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); -insert into rolespcrole.b select generate_series(1,100000); -- ok. - -\!sleep 5 - --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql deleted file mode 100644 index 575beedafe2..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_1.0_quota_create_in_2.0.sql +++ /dev/null @@ -1,16 +0,0 @@ --- need run 1.0_set_quota before run this test --- FIXME add version check here - -\! sleep 5 - -insert into s1.a select generate_series(1, 10000000); -- fail. -insert into srole.b select generate_series(1, 100000); -- fail. - -insert into rolespcrole.b select generate_series(1, 100000); -- ok. -insert into spcs1.a select generate_series(1, 100000); -- ok. - -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql b/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql deleted file mode 100644 index c2d9dbe33ea..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.0_test_in_2.1_quota_create_in_2.0.sql +++ /dev/null @@ -1,17 +0,0 @@ --- need run 2.0_set_quota before run this test --- FIXME add version check here - -\!sleep 5 - -insert into s1.a select generate_series(1, 10000000); -- fail. -insert into srole.b select generate_series(1, 100000); -- fail. - -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -insert into spcs1.a select generate_series(1, 100000); -- fail. - -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; - diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_catalog.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_catalog.sql deleted file mode 100644 index ebf5f00aa56..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_catalog.sql +++ /dev/null @@ -1,81 +0,0 @@ -CREATE FUNCTION typeid_to_name(oid[]) RETURNS name[] AS ' - WITH io AS ( - SELECT x.i AS index, x.o AS type_id FROM ( - SELECT generate_series(1, array_length($1, 1)) AS i, unnest($1) AS o - ) AS x - ) SELECT array_agg(typname order by io.index) FROM io, pg_type t WHERE io.type_id = t.oid; -' LANGUAGE sql STABLE; - --- types -SELECT - t1.typname, - array_agg(t2.typname order by a.atttypid) typname -FROM - pg_namespace n, - pg_class c, - pg_type t1, - pg_type t2, - pg_attribute a -WHERE - n.nspname = 'diskquota' - AND c.oid = t1.typrelid - AND n.oid = t1.typnamespace - AND a.attrelid = c.oid - AND t2.oid = a.atttypid -GROUP BY - t1.typname -ORDER BY - t1.typname; --- types end - --- tables -SELECT - relname, - typeid_to_name(ARRAY[c.reltype]::oid[]) as reltype, - typeid_to_name(ARRAY[c.reloftype]::oid[]) as reloftype -FROM - pg_class c, - pg_namespace n -WHERE - c.relnamespace = n.oid - AND n.nspname = 'diskquota' - and c.relkind != 'v' -ORDER BY - relname; --- tables end - --- UDF -SELECT - proname, - typeid_to_name(ARRAY[prorettype]::oid[]) as prorettype, - typeid_to_name(proargtypes) as proargtypes, - typeid_to_name(proallargtypes) as proallargtypes, - proargmodes, - prosrc, - probin, - proacl -FROM - pg_namespace n, - pg_proc p -WHERE - n.nspname = 'diskquota' - AND n.oid = p.pronamespace - AND p.proname != 'update_diskquota_db_list' -- update_diskquota_db_list in 1.0 can not be dropd, this is acceptable -ORDER BY - proname; --- UDF end - --- views -SELECT - schemaname, - viewname, - definition -FROM - pg_views -WHERE - schemaname = 'diskquota' -ORDER by - schemaname, viewname; --- views end - -DROP FUNCTION typeid_to_name (oid[]); diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_cleanup_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_cleanup_quota.sql deleted file mode 100644 index 3935d709fd9..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_cleanup_quota.sql +++ /dev/null @@ -1 +0,0 @@ -drop extension diskquota; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_install.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_install.sql deleted file mode 100644 index 645c6b34eae..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_install.sql +++ /dev/null @@ -1,17 +0,0 @@ --- cleanup previous diskquota installation -\! gpconfig -c shared_preload_libraries -v '' > /dev/null -\! gpstop -raf > /dev/null -\! dropdb --if-exists diskquota - --- TODO reset all diskquota GUC -\! gpstop -raf > /dev/null - --- setup basic environment -\! createdb diskquota - -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null -\! gpstop -raf > /dev/null - --- TODO setup GUC -\! gpconfig -c diskquota.naptime -v '1' > /dev/null -\! gpstop -raf > /dev/null diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_migrate_to_version_2.1.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_migrate_to_version_2.1.sql deleted file mode 100644 index d9c9bc96d6a..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_migrate_to_version_2.1.sql +++ /dev/null @@ -1,8 +0,0 @@ -\! gpconfig -c shared_preload_libraries -v 'diskquota-2.1.so' > /dev/null -\! gpstop -raf > /dev/null - -\! gpconfig -s 'shared_preload_libraries' - -\c -alter extension diskquota update to '2.1'; -\! sleep 5 diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_set_quota.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_set_quota.sql deleted file mode 100644 index 9711d953732..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_set_quota.sql +++ /dev/null @@ -1,44 +0,0 @@ -\!gpconfig -s 'shared_preload_libraries' - -create extension diskquota with version '2.1'; -\!sleep 5 - --- schema quota -create schema s1; -select diskquota.set_schema_quota('s1', '1 MB'); -create table s1.a(i int) distributed by (i); -insert into s1.a select generate_series(1, 10000000); -- ok. - --- role quota -create schema srole; -create role u1 nologin; -create table srole.b (t text) distributed by (t); -alter table srole.b owner to u1; -select diskquota.set_role_quota('u1', '1 MB'); -insert into srole.b select generate_series(1,100000); -- ok. - --- schema tablespace quota -\! mkdir -p /tmp/schemaspc -create schema spcs1; -create tablespace schemaspc location '/tmp/schemaspc'; -select diskquota.set_schema_tablespace_quota('spcs1', 'schemaspc','1 MB'); -create table spcs1.a(i int) tablespace schemaspc distributed by (i); -insert into spcs1.a select generate_series(1,100000); -- ok. - --- role tablespace quota -\! mkdir -p /tmp/rolespc -create tablespace rolespc location '/tmp/rolespc'; -create role rolespcu1 nologin; -create schema rolespcrole; -create table rolespcrole.b (t text) tablespace rolespc distributed by (t); -alter table rolespcrole.b owner to rolespcu1; -select diskquota.set_role_tablespace_quota('rolespcu1', 'rolespc', '1 MB'); -insert into rolespcrole.b select generate_series(1,100000); -- ok. - -\!sleep 5 - --- leaked resource: --- role u1, rolespcu1 --- table s1.a, srole.b spcs1.a, rolespcrole.b --- schema s1, srole, spcs1, rolespcrole --- tablespace schemaspc, rolespc diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql deleted file mode 100644 index 974df545602..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.0_quota_create_in_2.1.sql +++ /dev/null @@ -1,16 +0,0 @@ --- need run 2.1_set_quota before run this test --- FIXME add version check here - -\! sleep 5 - -insert into s1.a select generate_series(1, 10000000); -- fail. -insert into srole.b select generate_series(1, 100000); -- fail. - -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -insert into spcs1.a select generate_series(1, 100000); -- fail. - -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; diff --git a/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql b/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql deleted file mode 100644 index c2d9dbe33ea..00000000000 --- a/gpcontrib/diskquota/upgrade_test/sql/2.1_test_in_2.2_quota_create_in_2.1.sql +++ /dev/null @@ -1,17 +0,0 @@ --- need run 2.0_set_quota before run this test --- FIXME add version check here - -\!sleep 5 - -insert into s1.a select generate_series(1, 10000000); -- fail. -insert into srole.b select generate_series(1, 100000); -- fail. - -insert into rolespcrole.b select generate_series(1, 100000); -- fail. -insert into spcs1.a select generate_series(1, 100000); -- fail. - -drop table s1.a, srole.b, spcs1.a, rolespcrole.b; -drop schema s1, srole, spcs1, rolespcrole; -drop tablespace rolespc; -drop tablespace schemaspc; -drop role u1, rolespcu1; - diff --git a/gpcontrib/diskquota/upgrade_test/sql/dummy.sql b/gpcontrib/diskquota/upgrade_test/sql/dummy.sql deleted file mode 100644 index e69de29bb2d..00000000000 From de212a41891ae47d9af474d0232b2f152d79f794 Mon Sep 17 00:00:00 2001 From: Dianjin Wang Date: Fri, 12 Dec 2025 14:50:12 +0800 Subject: [PATCH 330/330] Doc: Add diskquota license for ASF compliance Add diskquota extension license information to comply with Apache release requirements. Changes: - Add diskquota entry to top-level LICENSE file under Greenplum section - Create licenses/LICENSE-diskquota.txt with PostgreSQL License text - Add gpcontrib/diskquota/** to pom.xml excludes for apache-rat checks The diskquota extension is licensed under the PostgreSQL License, originally developed by Pivotal Software and VMware. --- LICENSE | 8 +++++++- licenses/LICENSE-diskquota.txt | 31 +++++++++++++++++++++++++++++++ pom.xml | 2 ++ 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 licenses/LICENSE-diskquota.txt diff --git a/LICENSE b/LICENSE index 603400aa4cd..28796e982e1 100644 --- a/LICENSE +++ b/LICENSE @@ -311,12 +311,18 @@ The Greenplum Database software includes: gpcontrib/orafce/* see licenses/LICENSE-orafce.txt - ---------------------------- +---------------------------- BSD 3 Clause License gpcontrib/gpcloud/test/googletest see licenses/LICENSE-googletest.txt +---------------------------- + PostgreSQL License + + gpcontrib/diskquota/* + see licenses/LICENSE-diskquota.txt + ================================================================================ Apache Cloudberry includes codes from diff --git a/licenses/LICENSE-diskquota.txt b/licenses/LICENSE-diskquota.txt new file mode 100644 index 00000000000..6e94d88cbc9 --- /dev/null +++ b/licenses/LICENSE-diskquota.txt @@ -0,0 +1,31 @@ +Copyright (c) 2004-2020 Pivotal Software, Inc. +Copyright (c) 2020-Present VMware, Inc. or its affiliates + +diskquota is licensed under the PostgreSQL license, the same license +as PostgreSQL. It contains parts of PostgreSQL source code. A copy of +the license is below: + +-------------- +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this +paragraph and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS +DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/pom.xml b/pom.xml index 6d33d7e9de5..75fdaf6619e 100644 --- a/pom.xml +++ b/pom.xml @@ -153,6 +153,8 @@ code or new licensing patterns. gpcontrib/gp_exttable_fdw/data/** gpcontrib/gp_exttable_fdw/gp_exttable_fdw.control + gpcontrib/diskquota/** + getversion .git-blame-ignore-revs .dir-locals.el